diff options
Diffstat (limited to 'drivers')
1052 files changed, 85832 insertions, 28669 deletions
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 79b718430cd1..b23d1e4bad33 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -1136,7 +1136,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */ else put_dma(tx->index,eni_dev->dma,&j,(unsigned long) skb_frag_page(&skb_shinfo(skb)->frags[i]) + - skb_shinfo(skb)->frags[i].page_offset, + skb_frag_off(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i])); } if (skb->len & 3) { diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 211607986134..70b00ae4ec38 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -2580,10 +2580,9 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) slot = 0; } - tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, - (void *) page_address(frag->page) + frag->page_offset, - frag->size, DMA_TO_DEVICE); - tpd->iovec[slot].len = frag->size; + tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev, + frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); + tpd->iovec[slot].len = skb_frag_size(frag); ++slot; } diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 5c4c6eeb505c..c32f7dd9879a 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -516,9 +516,8 @@ struct geos_gpio_attr { static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct pci_dev *pdev = to_pci_dev(dev); struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr); - struct solos_card *card = pci_get_drvdata(pdev); + struct solos_card *card = dev_get_drvdata(dev); uint32_t data32; if (count != 1 && (count != 2 || buf[1] != '\n')) @@ -542,9 +541,8 @@ static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct pci_dev *pdev = to_pci_dev(dev); struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr); - struct solos_card *card = pci_get_drvdata(pdev); + struct solos_card *card = dev_get_drvdata(dev); uint32_t data32; data32 = ioread32(card->config_regs + GPIO_STATUS); @@ -556,9 +554,8 @@ static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr, static ssize_t hardware_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct pci_dev *pdev = to_pci_dev(dev); struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr); - struct solos_card *card = pci_get_drvdata(pdev); + struct solos_card *card = dev_get_drvdata(dev); uint32_t data32; data32 = ioread32(card->config_regs + GPIO_STATUS); diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 27e9686b6d3a..87760aa60446 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -59,22 +59,6 @@ static inline void mips_write32(struct bcma_drv_mips *mcore, bcma_write32(mcore->core, offset, value); } -static const u32 ipsflag_irq_mask[] = { - 0, - BCMA_MIPS_IPSFLAG_IRQ1, - BCMA_MIPS_IPSFLAG_IRQ2, - BCMA_MIPS_IPSFLAG_IRQ3, - BCMA_MIPS_IPSFLAG_IRQ4, -}; - -static const u32 ipsflag_irq_shift[] = { - 0, - BCMA_MIPS_IPSFLAG_IRQ1_SHIFT, - BCMA_MIPS_IPSFLAG_IRQ2_SHIFT, - BCMA_MIPS_IPSFLAG_IRQ3_SHIFT, - BCMA_MIPS_IPSFLAG_IRQ4_SHIFT, -}; - static u32 bcma_core_mips_irqflag(struct bcma_device *dev) { u32 flag; diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index f499a469e66d..12b2cc9a3fbe 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c @@ -78,7 +78,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address) v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD); } - v = BCMA_CORE_PCI_MDIODATA_START; + v |= BCMA_CORE_PCI_MDIODATA_START; v |= BCMA_CORE_PCI_MDIODATA_READ; v |= BCMA_CORE_PCI_MDIODATA_TA; @@ -121,7 +121,7 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device, v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD); } - v = BCMA_CORE_PCI_MDIODATA_START; + v |= BCMA_CORE_PCI_MDIODATA_START; v |= BCMA_CORE_PCI_MDIODATA_WRITE; v |= BCMA_CORE_PCI_MDIODATA_TA; v |= data; diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index f52239feb4cb..69c10a7b7c61 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -260,8 +260,7 @@ static void bcma_host_pci_remove(struct pci_dev *dev) #ifdef CONFIG_PM_SLEEP static int bcma_host_pci_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct bcma_bus *bus = pci_get_drvdata(pdev); + struct bcma_bus *bus = dev_get_drvdata(dev); bus->mapped_core = NULL; @@ -270,8 +269,7 @@ static int bcma_host_pci_suspend(struct device *dev) static int bcma_host_pci_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct bcma_bus *bus = pci_get_drvdata(pdev); + struct bcma_bus *bus = dev_get_drvdata(dev); return bcma_bus_resume(bus); } diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index 206edd3ba668..bd2c923a6586 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c @@ -222,7 +222,7 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom) { u16 v, o; int i; - u16 pwr_info_offset[] = { + static const u16 pwr_info_offset[] = { SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1, SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3 }; @@ -578,9 +578,11 @@ int bcma_sprom_get(struct bcma_bus *bus) { u16 offset = BCMA_CC_SPROM; u16 *sprom; - size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4, - SSB_SPROMSIZE_WORDS_R10, - SSB_SPROMSIZE_WORDS_R11, }; + static const size_t sprom_sizes[] = { + SSB_SPROMSIZE_WORDS_R4, + SSB_SPROMSIZE_WORDS_R10, + SSB_SPROMSIZE_WORDS_R11, + }; int i, err = 0; if (!bus->drv_cc.core) diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 124ef0a3e1dd..2d2e6d862068 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -23,6 +23,7 @@ #define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}}) #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}}) #define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}}) +#define BDADDR_BCM4345C5 (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0xc5, 0x45, 0x43}}) #define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}}) int btbcm_check_bdaddr(struct hci_dev *hdev) @@ -73,6 +74,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev) !bacmp(&bda->bdaddr, BDADDR_BCM2076B1) || !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) || !bacmp(&bda->bdaddr, BDADDR_BCM4330B1) || + !bacmp(&bda->bdaddr, BDADDR_BCM4345C5) || !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) || !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) { bt_dev_info(hdev, "BCM: Using default device address (%pMR)", @@ -332,6 +334,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { { 0x2122, "BCM4343A0" }, /* 001.001.034 */ { 0x2209, "BCM43430A1" }, /* 001.002.009 */ { 0x6119, "BCM4345C0" }, /* 003.001.025 */ + { 0x6606, "BCM4345C5" }, /* 003.006.006 */ { 0x230f, "BCM4356A2" }, /* 001.003.015 */ { 0x220e, "BCM20702A1" }, /* 001.002.014 */ { 0x4217, "BCM4329B1" }, /* 002.002.023 */ diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 0875470a7806..8cc21ad7cf29 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -106,8 +106,9 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) bt_dev_dbg(hdev, "QCA pre shutdown cmd"); - skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0, - NULL, HCI_INIT_TIMEOUT); + skb = __hci_cmd_sync_ev(hdev, QCA_PRE_SHUTDOWN_CMD, 0, + NULL, HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err); diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 4f75a9b61d09..bf3c02be6930 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -178,6 +178,27 @@ static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev, return &ic_id_table[i]; } +static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION failed (%ld)", + PTR_ERR(skb)); + return skb; + } + + if (skb->len != sizeof(struct hci_rp_read_local_version)) { + rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION event length mismatch"); + kfree_skb(skb); + return ERR_PTR(-EIO); + } + + return skb; +} + static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) { struct rtl_rom_version_evt *rom_version; @@ -186,19 +207,19 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) /* Read RTL ROM version command */ skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { - rtl_dev_err(hdev, "Read ROM version failed (%ld)\n", + rtl_dev_err(hdev, "Read ROM version failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } if (skb->len != sizeof(*rom_version)) { - rtl_dev_err(hdev, "RTL version event length mismatch\n"); + rtl_dev_err(hdev, "version event length mismatch"); kfree_skb(skb); return -EIO; } rom_version = (struct rtl_rom_version_evt *)skb->data; - rtl_dev_info(hdev, "rom_version status=%x version=%x\n", + rtl_dev_info(hdev, "rom_version status=%x version=%x", rom_version->status, rom_version->version); *version = rom_version->version; @@ -242,7 +263,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, fwptr = btrtl_dev->fw_data + btrtl_dev->fw_len - sizeof(extension_sig); if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { - rtl_dev_err(hdev, "extension section signature mismatch\n"); + rtl_dev_err(hdev, "extension section signature mismatch"); return -EINVAL; } @@ -263,7 +284,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, break; if (length == 0) { - rtl_dev_err(hdev, "found instruction with length 0\n"); + rtl_dev_err(hdev, "found instruction with length 0"); return -EINVAL; } @@ -276,7 +297,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, } if (project_id < 0) { - rtl_dev_err(hdev, "failed to find version instruction\n"); + rtl_dev_err(hdev, "failed to find version instruction"); return -EINVAL; } @@ -287,13 +308,13 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, } if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) { - rtl_dev_err(hdev, "unknown project id %d\n", project_id); + rtl_dev_err(hdev, "unknown project id %d", project_id); return -EINVAL; } if (btrtl_dev->ic_info->lmp_subver != project_id_to_lmp_subver[i].lmp_subver) { - rtl_dev_err(hdev, "firmware is for %x but this is a %x\n", + rtl_dev_err(hdev, "firmware is for %x but this is a %x", project_id_to_lmp_subver[i].lmp_subver, btrtl_dev->ic_info->lmp_subver); return -EINVAL; @@ -301,7 +322,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data; if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) { - rtl_dev_err(hdev, "bad EPATCH signature\n"); + rtl_dev_err(hdev, "bad EPATCH signature"); return -EINVAL; } @@ -368,6 +389,8 @@ static int rtl_download_firmware(struct hci_dev *hdev, int frag_len = RTL_FRAG_LEN; int ret = 0; int i; + struct sk_buff *skb; + struct hci_rp_read_local_version *rp; dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL); if (!dl_cmd) @@ -378,7 +401,11 @@ static int rtl_download_firmware(struct hci_dev *hdev, BT_DBG("download fw (%d/%d)", i, frag_num); - dl_cmd->index = i; + if (i > 0x7f) + dl_cmd->index = (i & 0x7f) + 1; + else + dl_cmd->index = i; + if (i == (frag_num - 1)) { dl_cmd->index |= 0x80; /* data end */ frag_len = fw_len % RTL_FRAG_LEN; @@ -389,14 +416,14 @@ static int rtl_download_firmware(struct hci_dev *hdev, skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { - rtl_dev_err(hdev, "download fw command failed (%ld)\n", + rtl_dev_err(hdev, "download fw command failed (%ld)", PTR_ERR(skb)); ret = -PTR_ERR(skb); goto out; } if (skb->len != sizeof(struct rtl_download_response)) { - rtl_dev_err(hdev, "download fw event length mismatch\n"); + rtl_dev_err(hdev, "download fw event length mismatch"); kfree_skb(skb); ret = -EIO; goto out; @@ -406,6 +433,18 @@ static int rtl_download_firmware(struct hci_dev *hdev, data += RTL_FRAG_LEN; } + skb = btrtl_read_local_version(hdev); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + rtl_dev_err(hdev, "read local version failed"); + goto out; + } + + rp = (struct hci_rp_read_local_version *)skb->data; + rtl_dev_info(hdev, "fw version 0x%04x%04x", + __le16_to_cpu(rp->hci_rev), __le16_to_cpu(rp->lmp_subver)); + kfree_skb(skb); + out: kfree(dl_cmd); return ret; @@ -416,7 +455,7 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) const struct firmware *fw; int ret; - rtl_dev_info(hdev, "rtl: loading %s\n", name); + rtl_dev_info(hdev, "loading %s", name); ret = request_firmware(&fw, name, &hdev->dev); if (ret < 0) return ret; @@ -440,7 +479,7 @@ static int btrtl_setup_rtl8723a(struct hci_dev *hdev, * (which is only for RTL8723B and newer). */ if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) { - rtl_dev_err(hdev, "unexpected EPATCH signature!\n"); + rtl_dev_err(hdev, "unexpected EPATCH signature!"); return -EINVAL; } @@ -475,7 +514,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, fw_data = tbuff; } - rtl_dev_info(hdev, "cfg_sz %d, total sz %d\n", btrtl_dev->cfg_len, ret); + rtl_dev_info(hdev, "cfg_sz %d, total sz %d", btrtl_dev->cfg_len, ret); ret = rtl_download_firmware(hdev, fw_data, ret); @@ -484,27 +523,6 @@ out: return ret; } -static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev) -{ - struct sk_buff *skb; - - skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, - HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION failed (%ld)\n", - PTR_ERR(skb)); - return skb; - } - - if (skb->len != sizeof(struct hci_rp_read_local_version)) { - rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION event length mismatch\n"); - kfree_skb(skb); - return ERR_PTR(-EIO); - } - - return skb; -} - void btrtl_free(struct btrtl_device_info *btrtl_dev) { kfree(btrtl_dev->fw_data); @@ -537,7 +555,7 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, } resp = (struct hci_rp_read_local_version *)skb->data; - rtl_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x\n", + rtl_dev_info(hdev, "examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x", resp->hci_ver, resp->hci_rev, resp->lmp_ver, resp->lmp_subver); @@ -550,7 +568,7 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, hdev->bus); if (!btrtl_dev->ic_info) { - rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", + rtl_dev_info(hdev, "unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", lmp_subver, hci_rev, hci_ver); return btrtl_dev; } @@ -564,7 +582,7 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, btrtl_dev->fw_len = rtl_load_file(hdev, btrtl_dev->ic_info->fw_name, &btrtl_dev->fw_data); if (btrtl_dev->fw_len < 0) { - rtl_dev_err(hdev, "firmware file %s not found\n", + rtl_dev_err(hdev, "firmware file %s not found", btrtl_dev->ic_info->fw_name); ret = btrtl_dev->fw_len; goto err_free; @@ -582,7 +600,7 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, &btrtl_dev->cfg_data); if (btrtl_dev->ic_info->config_needed && btrtl_dev->cfg_len <= 0) { - rtl_dev_err(hdev, "mandatory config file %s not found\n", + rtl_dev_err(hdev, "mandatory config file %s not found", btrtl_dev->ic_info->cfg_name); ret = btrtl_dev->cfg_len; goto err_free; @@ -608,7 +626,7 @@ int btrtl_download_firmware(struct hci_dev *hdev, * to a different value. */ if (!btrtl_dev->ic_info) { - rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n"); + rtl_dev_info(hdev, "assuming no firmware upload needed"); return 0; } @@ -622,7 +640,7 @@ int btrtl_download_firmware(struct hci_dev *hdev, case RTL_ROM_LMP_8822B: return btrtl_setup_rtl8723b(hdev, btrtl_dev); default: - rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n"); + rtl_dev_info(hdev, "assuming no firmware upload needed"); return 0; } } @@ -641,6 +659,11 @@ int btrtl_setup_realtek(struct hci_dev *hdev) btrtl_free(btrtl_dev); + /* Enable controller to do both LE scan and BR/EDR inquiry + * simultaneously. + */ + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + return ret; } EXPORT_SYMBOL_GPL(btrtl_setup_realtek); @@ -714,18 +737,18 @@ int btrtl_get_uart_settings(struct hci_dev *hdev, total_data_len = btrtl_dev->cfg_len - sizeof(*config); if (total_data_len <= 0) { - rtl_dev_warn(hdev, "no config loaded\n"); + rtl_dev_warn(hdev, "no config loaded"); return -EINVAL; } config = (struct rtl_vendor_config *)btrtl_dev->cfg_data; if (le32_to_cpu(config->signature) != RTL_CONFIG_MAGIC) { - rtl_dev_err(hdev, "invalid config magic\n"); + rtl_dev_err(hdev, "invalid config magic"); return -EINVAL; } if (total_data_len < le16_to_cpu(config->total_len)) { - rtl_dev_err(hdev, "config is too short\n"); + rtl_dev_err(hdev, "config is too short"); return -EINVAL; } @@ -735,7 +758,7 @@ int btrtl_get_uart_settings(struct hci_dev *hdev, switch (le16_to_cpu(entry->offset)) { case 0xc: if (entry->len < sizeof(*device_baudrate)) { - rtl_dev_err(hdev, "invalid UART config entry\n"); + rtl_dev_err(hdev, "invalid UART config entry"); return -EINVAL; } @@ -752,7 +775,7 @@ int btrtl_get_uart_settings(struct hci_dev *hdev, break; default: - rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)\n", + rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)", le16_to_cpu(entry->offset), entry->len); break; }; @@ -761,13 +784,13 @@ int btrtl_get_uart_settings(struct hci_dev *hdev, } if (!found) { - rtl_dev_err(hdev, "no UART config entry found\n"); + rtl_dev_err(hdev, "no UART config entry found"); return -ENOENT; } - rtl_dev_dbg(hdev, "device baudrate = 0x%08x\n", *device_baudrate); - rtl_dev_dbg(hdev, "controller baudrate = %u\n", *controller_baudrate); - rtl_dev_dbg(hdev, "flow control %d\n", *flow_control); + rtl_dev_dbg(hdev, "device baudrate = 0x%08x", *device_baudrate); + rtl_dev_dbg(hdev, "controller baudrate = %u", *controller_baudrate); + rtl_dev_dbg(hdev, "flow control %d", *flow_control); return 0; } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index ba4149054304..a9c35ebb30f8 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -438,6 +438,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = { #define BTUSB_OOB_WAKE_ENABLED 11 #define BTUSB_HW_RESET_ACTIVE 12 #define BTUSB_TX_WAIT_VND_EVT 13 +#define BTUSB_WAKEUP_DISABLE 14 struct btusb_data { struct hci_dev *hdev; @@ -526,6 +527,36 @@ static void btusb_intel_cmd_timeout(struct hci_dev *hdev) gpiod_set_value_cansleep(reset_gpio, 0); } +static void btusb_rtl_cmd_timeout(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct gpio_desc *reset_gpio = data->reset_gpio; + + if (++data->cmd_timeout_cnt < 5) + return; + + if (!reset_gpio) { + bt_dev_err(hdev, "No gpio to reset Realtek device, ignoring"); + return; + } + + /* Toggle the hard reset line. The Realtek device is going to + * yank itself off the USB and then replug. The cleanup is handled + * correctly on the way out (standard USB disconnect), and the new + * device is detected cleanly and bound to the driver again like + * it should be. + */ + if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { + bt_dev_err(hdev, "last reset failed? Not resetting again"); + return; + } + + bt_dev_err(hdev, "Reset Realtek device via gpio"); + gpiod_set_value_cansleep(reset_gpio, 0); + msleep(200); + gpiod_set_value_cansleep(reset_gpio, 1); +} + static inline void btusb_free_frags(struct btusb_data *data) { unsigned long flags; @@ -1174,6 +1205,13 @@ static int btusb_open(struct hci_dev *hdev) data->intf->needs_remote_wakeup = 1; + /* Disable device remote wakeup when host is suspended + * For Realtek chips, global suspend without + * SET_FEATURE (DEVICE_REMOTE_WAKEUP) can save more power in device. + */ + if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags)) + device_wakeup_disable(&data->udev->dev); + if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) goto done; @@ -1237,6 +1275,11 @@ static int btusb_close(struct hci_dev *hdev) goto failed; data->intf->needs_remote_wakeup = 0; + + /* Enable remote wake up for auto-suspend */ + if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags)) + data->intf->needs_remote_wakeup = 1; + usb_autopm_put_interface(data->intf); failed: @@ -3768,12 +3811,13 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_REALTEK) { hdev->setup = btrtl_setup_realtek; hdev->shutdown = btrtl_shutdown_realtek; + hdev->cmd_timeout = btusb_rtl_cmd_timeout; - /* Realtek devices lose their updated firmware over suspend, - * but the USB hub doesn't notice any status change. - * Explicitly request a device reset on resume. + /* Realtek devices lose their updated firmware over global + * suspend that means host doesn't send SET_FEATURE + * (DEVICE_REMOTE_WAKEUP) */ - interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; + set_bit(BTUSB_WAKEUP_DISABLE, &data->flags); } #endif @@ -3947,6 +3991,19 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) enable_irq(data->oob_wake_irq); } + /* For global suspend, Realtek devices lose the loaded fw + * in them. But for autosuspend, firmware should remain. + * Actually, it depends on whether the usb host sends + * set feature (enable wakeup) or not. + */ + if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags)) { + if (PMSG_IS_AUTO(message) && + device_can_wakeup(&data->udev->dev)) + data->udev->do_remote_wakeup = 1; + else if (!PMSG_IS_AUTO(message)) + data->udev->reset_resume = 1; + } + return 0; } diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index ae2624fce913..7646636f2d18 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -260,7 +260,7 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered) } /* wait for device to power on and come out of reset */ - usleep_range(10000, 20000); + usleep_range(100000, 120000); dev->res_enabled = powered; @@ -824,6 +824,21 @@ unlock: } #endif +/* Some firmware reports an IRQ which does not work (wrong pin in fw table?) */ +static const struct dmi_system_id bcm_broken_irq_dmi_table[] = { + { + .ident = "Meegopad T08", + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, + "To be filled by OEM."), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "T3 MRD"), + DMI_EXACT_MATCH(DMI_BOARD_VERSION, "V1.1"), + }, + }, + { } +}; + +#ifdef CONFIG_ACPI static const struct acpi_gpio_params first_gpio = { 0, 0, false }; static const struct acpi_gpio_params second_gpio = { 1, 0, false }; static const struct acpi_gpio_params third_gpio = { 2, 0, false }; @@ -842,21 +857,6 @@ static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = { { }, }; -/* Some firmware reports an IRQ which does not work (wrong pin in fw table?) */ -static const struct dmi_system_id bcm_broken_irq_dmi_table[] = { - { - .ident = "Meegopad T08", - .matches = { - DMI_EXACT_MATCH(DMI_BOARD_VENDOR, - "To be filled by OEM."), - DMI_EXACT_MATCH(DMI_BOARD_NAME, "T3 MRD"), - DMI_EXACT_MATCH(DMI_BOARD_VERSION, "V1.1"), - }, - }, - { } -}; - -#ifdef CONFIG_ACPI static int bcm_resource(struct acpi_resource *ares, void *data) { struct bcm_device *dev = data; @@ -1419,6 +1419,7 @@ static void bcm_serdev_remove(struct serdev_device *serdev) #ifdef CONFIG_OF static const struct of_device_id bcm_bluetooth_of_match[] = { { .compatible = "brcm,bcm20702a1" }, + { .compatible = "brcm,bcm4345c5" }, { .compatible = "brcm,bcm4330-bt" }, { .compatible = "brcm,bcm43438-bt" }, { }, diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 31dec435767b..e3164c200eac 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -504,26 +504,7 @@ static int qca_open(struct hci_uart *hu) qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; - /* clocks actually on, but we start votes off */ - qca->tx_vote = false; - qca->rx_vote = false; - qca->flags = 0; - - qca->ibs_sent_wacks = 0; - qca->ibs_sent_slps = 0; - qca->ibs_sent_wakes = 0; - qca->ibs_recv_wacks = 0; - qca->ibs_recv_slps = 0; - qca->ibs_recv_wakes = 0; qca->vote_last_jif = jiffies; - qca->vote_on_ms = 0; - qca->vote_off_ms = 0; - qca->votes_on = 0; - qca->votes_off = 0; - qca->tx_votes_on = 0; - qca->tx_votes_off = 0; - qca->rx_votes_on = 0; - qca->rx_votes_off = 0; hu->priv = qca; @@ -1263,6 +1244,11 @@ static int qca_setup(struct hci_uart *hu) /* Patch downloading has to be done without IBS mode */ clear_bit(QCA_IBS_ENABLED, &qca->flags); + /* Enable controller to do both LE scan and BR/EDR inquiry + * simultaneously. + */ + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + if (qca_is_wcn399x(soc_type)) { bt_dev_info(hdev, "setting up wcn3990"); @@ -1328,7 +1314,7 @@ static int qca_setup(struct hci_uart *hu) return ret; } -static struct hci_uart_proto qca_proto = { +static const struct hci_uart_proto qca_proto = { .id = HCI_UART_QCA, .name = "QCA", .manufacturer = 29, @@ -1391,6 +1377,8 @@ static int qca_power_off(struct hci_dev *hdev) /* Perform pre shutdown command */ qca_send_pre_shutdown_cmd(hdev); + usleep_range(8000, 10000); + qca_power_shutdown(hu); return 0; } diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 551bca6fef24..c70cb5f272cf 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1134,7 +1134,9 @@ copy: } /* Update the skb. */ if (merge) { - skb_shinfo(skb)->frags[i - 1].size += copy; + skb_frag_size_add( + &skb_shinfo(skb)->frags[i - 1], + copy); } else { skb_fill_page_desc(skb, i, page, off, copy); if (off + copy < pg_size) { @@ -1247,7 +1249,7 @@ new_buf: i = skb_shinfo(skb)->nr_frags; if (skb_can_coalesce(skb, i, page, offset)) { - skb_shinfo(skb)->frags[i - 1].size += copy; + skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); } else if (i < MAX_SKB_FRAGS) { get_page(page); skb_fill_page_desc(skb, i, page, offset, copy); diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 635bb4b447fb..e6df5b95ed47 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -474,7 +474,8 @@ static int chtls_getsockopt(struct sock *sk, int level, int optname, struct tls_context *ctx = tls_get_ctx(sk); if (level != SOL_TLS) - return ctx->getsockopt(sk, level, optname, optval, optlen); + return ctx->sk_proto->getsockopt(sk, level, + optname, optval, optlen); return do_chtls_getsockopt(sk, optval, optlen); } @@ -541,7 +542,8 @@ static int chtls_setsockopt(struct sock *sk, int level, int optname, struct tls_context *ctx = tls_get_ctx(sk); if (level != SOL_TLS) - return ctx->setsockopt(sk, level, optname, optval, optlen); + return ctx->sk_proto->setsockopt(sk, level, + optname, optval, optlen); return do_chtls_setsockopt(sk, optname, optval, optlen); } diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index 8b3a922bdad3..2cf83856f2e4 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -303,7 +303,7 @@ static void ms_ff_worker(struct work_struct *work) r->magnitude[MAGNITUDE_WEAK] = ms->weak; /* right actuator */ ret = hid_hw_output_report(hdev, (__u8 *)r, sizeof(*r)); - if (ret) + if (ret < 0) hid_warn(hdev, "failed to send FF report\n"); } diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 9aeed98b87a1..0253e76f1df2 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -181,7 +181,8 @@ static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) sg = sg_next(sg); BUG_ON(!sg); frag = &skb_shinfo(skb)->frags[i]; - sg_set_page(sg, frag->page.p, frag->size, frag->page_offset); + sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag), + skb_frag_off(frag)); } } diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c index af1b1ffcb38e..7d90b900131b 100644 --- a/drivers/infiniband/hw/hfi1/vnic_sdma.c +++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c @@ -102,13 +102,13 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde, goto bail_txadd; for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(tx->skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i]; /* combine physically continuous fragments later? */ ret = sdma_txadd_page(sde->dd, &tx->txreq, skb_frag_page(frag), - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag)); if (unlikely(ret)) goto bail_txadd; diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c index 6c8645033102..4937947400cd 100644 --- a/drivers/infiniband/hw/mlx5/cmd.c +++ b/drivers/infiniband/hw/mlx5/cmd.c @@ -186,136 +186,6 @@ int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length) return err; } -int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length, - u16 uid, phys_addr_t *addr, u32 *obj_id) -{ - struct mlx5_core_dev *dev = dm->dev; - u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; - u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {}; - unsigned long *block_map; - u64 icm_start_addr; - u32 log_icm_size; - u32 num_blocks; - u32 max_blocks; - u64 block_idx; - void *sw_icm; - int ret; - - MLX5_SET(general_obj_in_cmd_hdr, in, opcode, - MLX5_CMD_OP_CREATE_GENERAL_OBJECT); - MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM); - MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid); - - switch (type) { - case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: - icm_start_addr = MLX5_CAP64_DEV_MEM(dev, - steering_sw_icm_start_address); - log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size); - block_map = dm->steering_sw_icm_alloc_blocks; - break; - case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: - icm_start_addr = MLX5_CAP64_DEV_MEM(dev, - header_modify_sw_icm_start_address); - log_icm_size = MLX5_CAP_DEV_MEM(dev, - log_header_modify_sw_icm_size); - block_map = dm->header_modify_sw_icm_alloc_blocks; - break; - default: - return -EINVAL; - } - - num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >> - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); - max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); - spin_lock(&dm->lock); - block_idx = bitmap_find_next_zero_area(block_map, - max_blocks, - 0, - num_blocks, 0); - - if (block_idx < max_blocks) - bitmap_set(block_map, - block_idx, num_blocks); - - spin_unlock(&dm->lock); - - if (block_idx >= max_blocks) - return -ENOMEM; - - sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm); - icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); - MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr, - icm_start_addr); - MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length)); - - ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (ret) { - spin_lock(&dm->lock); - bitmap_clear(block_map, - block_idx, num_blocks); - spin_unlock(&dm->lock); - - return ret; - } - - *addr = icm_start_addr; - *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); - - return 0; -} - -int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length, - u16 uid, phys_addr_t addr, u32 obj_id) -{ - struct mlx5_core_dev *dev = dm->dev; - u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; - u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; - unsigned long *block_map; - u32 num_blocks; - u64 start_idx; - int err; - - num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >> - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); - - switch (type) { - case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: - start_idx = - (addr - MLX5_CAP64_DEV_MEM( - dev, steering_sw_icm_start_address)) >> - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); - block_map = dm->steering_sw_icm_alloc_blocks; - break; - case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: - start_idx = - (addr - - MLX5_CAP64_DEV_MEM( - dev, header_modify_sw_icm_start_address)) >> - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); - block_map = dm->header_modify_sw_icm_alloc_blocks; - break; - default: - return -EINVAL; - } - - MLX5_SET(general_obj_in_cmd_hdr, in, opcode, - MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); - MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM); - MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); - MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid); - - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (err) - return err; - - spin_lock(&dm->lock); - bitmap_clear(block_map, - start_idx, num_blocks); - spin_unlock(&dm->lock); - - return 0; -} - int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out) { u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h index 0572dcba6eae..169cab4915e3 100644 --- a/drivers/infiniband/hw/mlx5/cmd.h +++ b/drivers/infiniband/hw/mlx5/cmd.h @@ -65,8 +65,4 @@ int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id, u16 uid); int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port); -int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length, - u16 uid, phys_addr_t *addr, u32 *obj_id); -int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length, - u16 uid, phys_addr_t addr, u32 obj_id); #endif /* MLX5_IB_CMD_H */ diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index af5bbb35c058..25b6482c5368 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -922,6 +922,7 @@ static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev) case MLX5_CMD_OP_QUERY_CONG_STATUS: case MLX5_CMD_OP_QUERY_CONG_PARAMS: case MLX5_CMD_OP_QUERY_CONG_STATISTICS: + case MLX5_CMD_OP_QUERY_LAG: return true; default: return false; diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c index b8841355fcd5..1c8f04abee0c 100644 --- a/drivers/infiniband/hw/mlx5/flow.c +++ b/drivers/infiniband/hw/mlx5/flow.c @@ -322,11 +322,11 @@ void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction) switch (maction->flow_action_raw.sub_type) { case MLX5_IB_FLOW_ACTION_MODIFY_HEADER: mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev, - maction->flow_action_raw.action_id); + maction->flow_action_raw.modify_hdr); break; case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT: mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev, - maction->flow_action_raw.action_id); + maction->flow_action_raw.pkt_reformat); break; case MLX5_IB_FLOW_ACTION_DECAP: break; @@ -352,10 +352,11 @@ mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev, if (!maction) return ERR_PTR(-ENOMEM); - ret = mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in, - &maction->flow_action_raw.action_id); + maction->flow_action_raw.modify_hdr = + mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in); - if (ret) { + if (IS_ERR(maction->flow_action_raw.modify_hdr)) { + ret = PTR_ERR(maction->flow_action_raw.modify_hdr); kfree(maction); return ERR_PTR(ret); } @@ -479,11 +480,13 @@ static int mlx5_ib_flow_action_create_packet_reformat_ctx( if (ret) return ret; - ret = mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len, - in, namespace, - &maction->flow_action_raw.action_id); - if (ret) + maction->flow_action_raw.pkt_reformat = + mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len, + in, namespace); + if (IS_ERR(maction->flow_action_raw.pkt_reformat)) { + ret = PTR_ERR(maction->flow_action_raw.pkt_reformat); return ret; + } maction->flow_action_raw.sub_type = MLX5_IB_FLOW_ACTION_PACKET_REFORMAT; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 0569bcab02d4..4e9f1507ffd9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2280,6 +2280,7 @@ static inline int check_dm_type_support(struct mlx5_ib_dev *dev, return -EOPNOTSUPP; break; case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: + case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW)) return -EPERM; @@ -2344,20 +2345,20 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx, struct uverbs_attr_bundle *attrs, int type) { - struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm; + struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev; u64 act_size; int err; /* Allocation size must a multiple of the basic block size * and a power of 2. */ - act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev)); + act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev)); act_size = roundup_pow_of_two(act_size); dm->size = act_size; - err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size, - to_mucontext(ctx)->devx_uid, &dm->dev_addr, - &dm->icm_dm.obj_id); + err = mlx5_dm_sw_icm_alloc(dev, type, act_size, + to_mucontext(ctx)->devx_uid, &dm->dev_addr, + &dm->icm_dm.obj_id); if (err) return err; @@ -2365,9 +2366,9 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, &dm->dev_addr, sizeof(dm->dev_addr)); if (err) - mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size, - to_mucontext(ctx)->devx_uid, - dm->dev_addr, dm->icm_dm.obj_id); + mlx5_dm_sw_icm_dealloc(dev, type, dm->size, + to_mucontext(ctx)->devx_uid, dm->dev_addr, + dm->icm_dm.obj_id); return err; } @@ -2407,8 +2408,14 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, attrs); break; case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: + err = handle_alloc_dm_sw_icm(context, dm, + attr, attrs, + MLX5_SW_ICM_TYPE_STEERING); + break; case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: - err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type); + err = handle_alloc_dm_sw_icm(context, dm, + attr, attrs, + MLX5_SW_ICM_TYPE_HEADER_MODIFY); break; default: err = -EOPNOTSUPP; @@ -2428,6 +2435,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) { struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context( &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); + struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev; struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm; struct mlx5_ib_dm *dm = to_mdm(ibdm); u32 page_idx; @@ -2439,19 +2447,23 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) if (ret) return ret; - page_idx = (dm->dev_addr - - pci_resource_start(dm_db->dev->pdev, 0) - - MLX5_CAP64_DEV_MEM(dm_db->dev, - memic_bar_start_addr)) >> - PAGE_SHIFT; + page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) - + MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >> + PAGE_SHIFT; bitmap_clear(ctx->dm_pages, page_idx, DIV_ROUND_UP(dm->size, PAGE_SIZE)); break; case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: + ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING, + dm->size, ctx->devx_uid, dm->dev_addr, + dm->icm_dm.obj_id); + if (ret) + return ret; + break; case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: - ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size, - ctx->devx_uid, dm->dev_addr, - dm->icm_dm.obj_id); + ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY, + dm->size, ctx->devx_uid, dm->dev_addr, + dm->icm_dm.obj_id); if (ret) return ret; break; @@ -2646,7 +2658,8 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) return -EINVAL; action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - action->modify_id = maction->flow_action_raw.action_id; + action->modify_hdr = + maction->flow_action_raw.modify_hdr; return 0; } if (maction->flow_action_raw.sub_type == @@ -2663,8 +2676,8 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, return -EINVAL; action->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; - action->reformat_id = - maction->flow_action_raw.action_id; + action->pkt_reformat = + maction->flow_action_raw.pkt_reformat; return 0; } /* fall through */ @@ -6096,8 +6109,6 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device, static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) { - struct mlx5_core_dev *mdev = dev->mdev; - mlx5_ib_cleanup_multiport_master(dev); if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { srcu_barrier(&dev->mr_srcu); @@ -6105,29 +6116,11 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) } WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES)); - - WARN_ON(dev->dm.steering_sw_icm_alloc_blocks && - !bitmap_empty( - dev->dm.steering_sw_icm_alloc_blocks, - BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) - - MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)))); - - kfree(dev->dm.steering_sw_icm_alloc_blocks); - - WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks && - !bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks, - BIT(MLX5_CAP_DEV_MEM( - mdev, log_header_modify_sw_icm_size) - - MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)))); - - kfree(dev->dm.header_modify_sw_icm_alloc_blocks); } static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; - u64 header_modify_icm_blocks = 0; - u64 steering_icm_blocks = 0; int err; int i; @@ -6174,51 +6167,17 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) INIT_LIST_HEAD(&dev->qp_list); spin_lock_init(&dev->reset_flow_resource_lock); - if (MLX5_CAP_GEN_64(mdev, general_obj_types) & - MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) { - if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) { - steering_icm_blocks = - BIT(MLX5_CAP_DEV_MEM(mdev, - log_steering_sw_icm_size) - - MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)); - - dev->dm.steering_sw_icm_alloc_blocks = - kcalloc(BITS_TO_LONGS(steering_icm_blocks), - sizeof(unsigned long), GFP_KERNEL); - if (!dev->dm.steering_sw_icm_alloc_blocks) - goto err_mp; - } - - if (MLX5_CAP64_DEV_MEM(mdev, - header_modify_sw_icm_start_address)) { - header_modify_icm_blocks = BIT( - MLX5_CAP_DEV_MEM( - mdev, log_header_modify_sw_icm_size) - - MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)); - - dev->dm.header_modify_sw_icm_alloc_blocks = - kcalloc(BITS_TO_LONGS(header_modify_icm_blocks), - sizeof(unsigned long), GFP_KERNEL); - if (!dev->dm.header_modify_sw_icm_alloc_blocks) - goto err_dm; - } - } - spin_lock_init(&dev->dm.lock); dev->dm.dev = mdev; if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { err = init_srcu_struct(&dev->mr_srcu); if (err) - goto err_dm; + goto err_mp; } return 0; -err_dm: - kfree(dev->dm.steering_sw_icm_alloc_blocks); - kfree(dev->dm.header_modify_sw_icm_alloc_blocks); - err_mp: mlx5_ib_cleanup_multiport_master(dev); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 9ae587b74b12..125a507c10ed 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -868,7 +868,10 @@ struct mlx5_ib_flow_action { struct { struct mlx5_ib_dev *dev; u32 sub_type; - u32 action_id; + union { + struct mlx5_modify_hdr *modify_hdr; + struct mlx5_pkt_reformat *pkt_reformat; + }; } flow_action_raw; }; }; @@ -881,8 +884,6 @@ struct mlx5_dm { */ spinlock_t lock; DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES); - unsigned long *steering_sw_icm_alloc_blocks; - unsigned long *header_modify_sw_icm_alloc_blocks; }; struct mlx5_read_counters_attr { diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c index b0d0687c7a68..8fc3630a9d4c 100644 --- a/drivers/infiniband/hw/mlx5/srq_cmd.c +++ b/drivers/infiniband/hw/mlx5/srq_cmd.c @@ -86,7 +86,7 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn) xa_lock(&table->array); srq = xa_load(&table->array, srqn); if (srq) - atomic_inc(&srq->common.refcount); + refcount_inc(&srq->common.refcount); xa_unlock(&table->array); return srq; @@ -592,7 +592,7 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, if (err) return err; - atomic_set(&srq->common.refcount, 1); + refcount_set(&srq->common.refcount, 1); init_completion(&srq->common.free); err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL)); @@ -675,7 +675,7 @@ static int srq_event_notifier(struct notifier_block *nb, xa_lock(&table->array); srq = xa_load(&table->array, srqn); if (srq) - atomic_inc(&srq->common.refcount); + refcount_inc(&srq->common.refcount); xa_unlock(&table->array); if (!srq) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 78fa777c87b1..c332b4761816 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -293,7 +293,8 @@ int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping[i + off] = ib_dma_map_page(ca, skb_frag_page(frag), - frag->page_offset, skb_frag_size(frag), + skb_frag_off(frag), + skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(ca, mapping[i + off]))) goto partial_error; diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c index 81f2b183acc8..1137dd152b5c 100644 --- a/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/drivers/isdn/hardware/mISDN/avmfritz.c @@ -509,8 +509,7 @@ HDLC_irq_xpr(struct bchannel *bch) if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) { hdlc_fill_fifo(bch); } else { - if (bch->tx_skb) - dev_kfree_skb(bch->tx_skb); + dev_kfree_skb(bch->tx_skb); if (get_next_bframe(bch)) { hdlc_fill_fifo(bch); test_and_clear_bit(FLG_TX_EMPTY, &bch->Flags); diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 4a069582fc6b..2330a7d24267 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -1119,8 +1119,7 @@ tx_birq(struct bchannel *bch) if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) hfcpci_fill_fifo(bch); else { - if (bch->tx_skb) - dev_kfree_skb(bch->tx_skb); + dev_kfree_skb(bch->tx_skb); if (get_next_bframe(bch)) hfcpci_fill_fifo(bch); } @@ -1132,8 +1131,7 @@ tx_dirq(struct dchannel *dch) if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len) hfcpci_fill_dfifo(dch->hw); else { - if (dch->tx_skb) - dev_kfree_skb(dch->tx_skb); + dev_kfree_skb(dch->tx_skb); if (get_next_dframe(dch)) hfcpci_fill_dfifo(dch->hw); } diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index f915399d75ca..bca880213e91 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -190,8 +190,7 @@ isac_rme_irq(struct isac_hw *isac) #endif } WriteISAC(isac, ISAC_CMDR, 0x80); - if (isac->dch.rx_skb) - dev_kfree_skb(isac->dch.rx_skb); + dev_kfree_skb(isac->dch.rx_skb); isac->dch.rx_skb = NULL; } else { count = ReadISAC(isac, ISAC_RBCL) & 0x1f; @@ -210,8 +209,7 @@ isac_xpr_irq(struct isac_hw *isac) if (isac->dch.tx_skb && isac->dch.tx_idx < isac->dch.tx_skb->len) { isac_fill_fifo(isac); } else { - if (isac->dch.tx_skb) - dev_kfree_skb(isac->dch.tx_skb); + dev_kfree_skb(isac->dch.tx_skb); if (get_next_dframe(&isac->dch)) isac_fill_fifo(isac); } @@ -464,8 +462,7 @@ isacsx_rme_irq(struct isac_hw *isac) isac->dch.err_crc++; #endif WriteISAC(isac, ISACX_CMDRD, ISACX_CMDRD_RMC); - if (isac->dch.rx_skb) - dev_kfree_skb(isac->dch.rx_skb); + dev_kfree_skb(isac->dch.rx_skb); isac->dch.rx_skb = NULL; } else { count = ReadISAC(isac, ISACX_RBCLD) & 0x1f; @@ -1012,8 +1009,7 @@ hscx_xpr(struct hscx_hw *hx) if (hx->bch.tx_skb && hx->bch.tx_idx < hx->bch.tx_skb->len) { hscx_fill_fifo(hx); } else { - if (hx->bch.tx_skb) - dev_kfree_skb(hx->bch.tx_skb); + dev_kfree_skb(hx->bch.tx_skb); if (get_next_bframe(&hx->bch)) { hscx_fill_fifo(hx); test_and_clear_bit(FLG_TX_EMPTY, &hx->bch.Flags); diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c index fd5c52f37802..4a3e748a1c26 100644 --- a/drivers/isdn/hardware/mISDN/mISDNisar.c +++ b/drivers/isdn/hardware/mISDN/mISDNisar.c @@ -690,8 +690,7 @@ send_next(struct isar_ch *ch) } } } - if (ch->bch.tx_skb) - dev_kfree_skb(ch->bch.tx_skb); + dev_kfree_skb(ch->bch.tx_skb); if (get_next_bframe(&ch->bch)) { isar_fill_fifo(ch); test_and_clear_bit(FLG_TX_EMPTY, &ch->bch.Flags); diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index 4e30affd1a7c..61caa7e50b9a 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -605,8 +605,7 @@ bc_next_frame(struct tiger_ch *bc) if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) { fill_dma(bc); } else { - if (bc->bch.tx_skb) - dev_kfree_skb(bc->bch.tx_skb); + dev_kfree_skb(bc->bch.tx_skb); if (get_next_bframe(&bc->bch)) { fill_dma(bc); test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags); diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c index 2402608dc98d..bad55fdacd36 100644 --- a/drivers/isdn/hardware/mISDN/w6692.c +++ b/drivers/isdn/hardware/mISDN/w6692.c @@ -356,8 +356,7 @@ handle_rxD(struct w6692_hw *card) { card->dch.err_rx++; #endif } - if (card->dch.rx_skb) - dev_kfree_skb(card->dch.rx_skb); + dev_kfree_skb(card->dch.rx_skb); card->dch.rx_skb = NULL; WriteW6692(card, W_D_CMDR, W_D_CMDR_RACK | W_D_CMDR_RRST); } else { @@ -376,8 +375,7 @@ handle_txD(struct w6692_hw *card) { if (card->dch.tx_skb && card->dch.tx_idx < card->dch.tx_skb->len) { W6692_fill_Dfifo(card); } else { - if (card->dch.tx_skb) - dev_kfree_skb(card->dch.tx_skb); + dev_kfree_skb(card->dch.tx_skb); if (get_next_dframe(&card->dch)) W6692_fill_Dfifo(card); } @@ -636,8 +634,7 @@ send_next(struct w6692_ch *wch) if (wch->bch.tx_skb && wch->bch.tx_idx < wch->bch.tx_skb->len) { W6692_fill_Bfifo(wch); } else { - if (wch->bch.tx_skb) - dev_kfree_skb(wch->bch.tx_skb); + dev_kfree_skb(wch->bch.tx_skb); if (get_next_bframe(&wch->bch)) { W6692_fill_Bfifo(wch); test_and_clear_bit(FLG_TX_EMPTY, &wch->bch.Flags); diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 447f241467bd..b57dcb834594 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -1254,8 +1254,7 @@ release_card(struct l1oip *hc) mISDN_freebchannel(hc->chan[ch].bch); kfree(hc->chan[ch].bch); #ifdef REORDER_DEBUG - if (hc->chan[ch].disorder_skb) - dev_kfree_skb(hc->chan[ch].disorder_skb); + dev_kfree_skb(hc->chan[ch].disorder_skb); #endif } } diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c index 68a481516729..5bf7fcb282c4 100644 --- a/drivers/isdn/mISDN/layer2.c +++ b/drivers/isdn/mISDN/layer2.c @@ -900,8 +900,7 @@ l2_disconnect(struct FsmInst *fi, int event, void *arg) send_uframe(l2, NULL, DISC | 0x10, CMD); mISDN_FsmDelTimer(&l2->t203, 1); restart_t200(l2, 2); - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); } static void @@ -1722,8 +1721,7 @@ l2_set_own_busy(struct FsmInst *fi, int event, void *arg) enquiry_cr(l2, RNR, RSP, 0); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); } static void @@ -1736,8 +1734,7 @@ l2_clear_own_busy(struct FsmInst *fi, int event, void *arg) enquiry_cr(l2, RR, RSP, 0); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); } static void diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index fa2237e7bcf8..27aa32914425 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -75,8 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb) cskb = NULL; } read_unlock(&sl->lock); - if (cskb) - dev_kfree_skb(cskb); + dev_kfree_skb(cskb); } static void @@ -134,8 +133,7 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb) } out: mutex_unlock(&st->lmutex); - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); } static inline int diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c index a4fa594e1caf..59d28cb19738 100644 --- a/drivers/isdn/mISDN/tei.c +++ b/drivers/isdn/mISDN/tei.c @@ -1328,10 +1328,8 @@ mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb) } out: read_unlock_irqrestore(&mgr->lock, flags); - if (cskb) - dev_kfree_skb(cskb); - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(cskb); + dev_kfree_skb(skb); return 0; } diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index b780be6f41ff..c09b567845e1 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -44,7 +44,7 @@ static void rx(struct net_device *dev, int bufnum, { struct arcnet_local *lp = netdev_priv(dev); struct sk_buff *skb; - struct archdr *pkt = pkthdr; + struct archdr *pkt; char *pktbuf, *pkthdrbuf; int ofs; diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index 1360f1ffe070..f3f86ef68ae0 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c @@ -55,11 +55,6 @@ void bond_debug_register(struct bonding *bond) bond->debug_dir = debugfs_create_dir(bond->dev->name, bonding_debug_root); - if (!bond->debug_dir) { - netdev_warn(bond->dev, "failed to register to debugfs\n"); - return; - } - debugfs_create_file("rlb_hash_table", 0400, bond->debug_dir, bond, &bond_debug_rlb_hash_fops); } diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index ed3c437063dc..40b079162804 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -94,26 +94,20 @@ static inline void update_tty_status(struct ser_device *ser) } static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty) { - ser->debugfs_tty_dir = - debugfs_create_dir(tty->name, debugfsdir); - if (!IS_ERR(ser->debugfs_tty_dir)) { - debugfs_create_blob("last_tx_msg", 0400, - ser->debugfs_tty_dir, - &ser->tx_blob); + ser->debugfs_tty_dir = debugfs_create_dir(tty->name, debugfsdir); - debugfs_create_blob("last_rx_msg", 0400, - ser->debugfs_tty_dir, - &ser->rx_blob); + debugfs_create_blob("last_tx_msg", 0400, ser->debugfs_tty_dir, + &ser->tx_blob); - debugfs_create_x32("ser_state", 0400, - ser->debugfs_tty_dir, - (u32 *)&ser->state); + debugfs_create_blob("last_rx_msg", 0400, ser->debugfs_tty_dir, + &ser->rx_blob); - debugfs_create_x8("tty_status", 0400, - ser->debugfs_tty_dir, - &ser->tty_status); + debugfs_create_x32("ser_state", 0400, ser->debugfs_tty_dir, + (u32 *)&ser->state); + + debugfs_create_x8("tty_status", 0400, ser->debugfs_tty_dir, + &ser->tty_status); - } ser->tx_blob.data = ser->tx_data; ser->tx_blob.size = 0; ser->rx_blob.data = ser->rx_data; diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 27e93a438dd9..eb426822ad06 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -623,11 +623,7 @@ static void cfv_netdev_setup(struct net_device *netdev) /* Create debugfs counters for the device */ static inline void debugfs_init(struct cfv_info *cfv) { - cfv->debugfs = - debugfs_create_dir(netdev_name(cfv->ndev), NULL); - - if (IS_ERR(cfv->debugfs)) - return; + cfv->debugfs = debugfs_create_dir(netdev_name(cfv->ndev), NULL); debugfs_create_u32("rx-napi-complete", 0400, cfv->debugfs, &cfv->stats.rx_napi_complete); diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index ab585900a057..17c166cc8482 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -120,6 +120,19 @@ config CAN_JANZ_ICAN3 This driver can also be built as a module. If so, the module will be called janz-ican3.ko. +config CAN_KVASER_PCIEFD + depends on PCI + tristate "Kvaser PCIe FD cards" + help + This is a driver for the Kvaser PCI Express CAN FD family. + + Supported devices: + Kvaser PCIEcan 4xHS + Kvaser PCIEcan 2xHS v2 + Kvaser PCIEcan HS v2 + Kvaser Mini PCI Express HS v2 + Kvaser Mini PCI Express 2xHS v2 + config CAN_SUN4I tristate "Allwinner A10 CAN controller" depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 44922bf29b6a..22164300122d 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o obj-$(CONFIG_CAN_GRCAN) += grcan.o obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/ obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o +obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_M_CAN) += m_can/ obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/ diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 483d270664cc..ac86be52b461 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2005 Marc Kleine-Budde, Pengutronix +/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix * Copyright (C) 2006 Andrey Volkov, Varma Electronics * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> */ @@ -12,6 +11,7 @@ #include <linux/if_arp.h> #include <linux/workqueue.h> #include <linux/can.h> +#include <linux/can/can-ml.h> #include <linux/can/dev.h> #include <linux/can/skb.h> #include <linux/can/netlink.h> @@ -62,8 +62,7 @@ EXPORT_SYMBOL_GPL(can_len2dlc); #define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ #define CAN_CALC_SYNC_SEG 1 -/* - * Bit-timing calculation derived from: +/* Bit-timing calculation derived from: * * Code based on LinCAN sources and H8S2638 project * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz @@ -75,10 +74,11 @@ EXPORT_SYMBOL_GPL(can_len2dlc); * registers of the CAN controller. You can find more information * in the header file linux/can/netlink.h. */ -static int can_update_sample_point(const struct can_bittiming_const *btc, - unsigned int sample_point_nominal, unsigned int tseg, - unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, - unsigned int *sample_point_error_ptr) +static int +can_update_sample_point(const struct can_bittiming_const *btc, + unsigned int sample_point_nominal, unsigned int tseg, + unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, + unsigned int *sample_point_error_ptr) { unsigned int sample_point_error, best_sample_point_error = UINT_MAX; unsigned int sample_point, best_sample_point = 0; @@ -86,7 +86,9 @@ static int can_update_sample_point(const struct can_bittiming_const *btc, int i; for (i = 0; i <= 1; i++) { - tseg2 = tseg + CAN_CALC_SYNC_SEG - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / 1000 - i; + tseg2 = tseg + CAN_CALC_SYNC_SEG - + (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / + 1000 - i; tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max); tseg1 = tseg - tseg2; if (tseg1 > btc->tseg1_max) { @@ -94,10 +96,12 @@ static int can_update_sample_point(const struct can_bittiming_const *btc, tseg2 = tseg - tseg1; } - sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / (tseg + CAN_CALC_SYNC_SEG); + sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / + (tseg + CAN_CALC_SYNC_SEG); sample_point_error = abs(sample_point_nominal - sample_point); - if ((sample_point <= sample_point_nominal) && (sample_point_error < best_sample_point_error)) { + if (sample_point <= sample_point_nominal && + sample_point_error < best_sample_point_error) { best_sample_point = sample_point; best_sample_point_error = sample_point_error; *tseg1_ptr = tseg1; @@ -148,7 +152,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, /* choose brp step which is possible in system */ brp = (brp / btc->brp_inc) * btc->brp_inc; - if ((brp < btc->brp_min) || (brp > btc->brp_max)) + if (brp < btc->brp_min || brp > btc->brp_max) continue; bitrate = priv->clock.freq / (brp * tsegall); @@ -162,7 +166,8 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, if (bitrate_error < best_bitrate_error) best_sample_point_error = UINT_MAX; - can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error); + can_update_sample_point(btc, sample_point_nominal, tseg / 2, + &tseg1, &tseg2, &sample_point_error); if (sample_point_error > best_sample_point_error) continue; @@ -191,8 +196,9 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, } /* real sample point */ - bt->sample_point = can_update_sample_point(btc, sample_point_nominal, best_tseg, - &tseg1, &tseg2, NULL); + bt->sample_point = can_update_sample_point(btc, sample_point_nominal, + best_tseg, &tseg1, &tseg2, + NULL); v64 = (u64)best_brp * 1000 * 1000 * 1000; do_div(v64, priv->clock.freq); @@ -216,7 +222,8 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, bt->brp = best_brp; /* real bitrate */ - bt->bitrate = priv->clock.freq / (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2)); + bt->bitrate = priv->clock.freq / + (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2)); return 0; } @@ -229,8 +236,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, } #endif /* CONFIG_CAN_CALC_BITTIMING */ -/* - * Checks the validity of the specified bit-timing parameters prop_seg, +/* Checks the validity of the specified bit-timing parameters prop_seg, * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate * prescaler value brp. You can find more information in the header * file linux/can/netlink.h. @@ -270,9 +276,10 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, } /* Checks the validity of predefined bitrate settings */ -static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, - const u32 *bitrate_const, - const unsigned int bitrate_const_cnt) +static int +can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt) { struct can_priv *priv = netdev_priv(dev); unsigned int i; @@ -295,8 +302,7 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, { int err; - /* - * Depending on the given can_bittiming parameter structure the CAN + /* Depending on the given can_bittiming parameter structure the CAN * timing parameters are calculated based on the provided bitrate OR * alternatively the CAN timing parameters (tq, prop_seg, etc.) are * provided directly which are then checked and fixed up. @@ -397,8 +403,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, } EXPORT_SYMBOL_GPL(can_change_state); -/* - * Local echo of CAN messages +/* Local echo of CAN messages * * CAN network devices *should* support a local echo functionality * (see Documentation/networking/can.rst). To test the handling of CAN @@ -423,8 +428,7 @@ static void can_flush_echo_skb(struct net_device *dev) } } -/* - * Put the skb on the stack to be looped backed locally lateron +/* Put the skb on the stack to be looped backed locally lateron * * The function is typically called in the start_xmit function * of the device driver. The driver must protect access to @@ -446,7 +450,6 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, } if (!priv->echo_skb[idx]) { - skb = can_create_echo_skb(skb); if (!skb) return; @@ -466,7 +469,8 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, } EXPORT_SYMBOL_GPL(can_put_echo_skb); -struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) +struct sk_buff * +__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) { struct can_priv *priv = netdev_priv(dev); @@ -493,8 +497,7 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 return NULL; } -/* - * Get the skb from the stack and loop it back locally +/* Get the skb from the stack and loop it back locally * * The function is typically called when the TX done interrupt * is handled in the device driver. The driver must protect @@ -515,11 +518,10 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) } EXPORT_SYMBOL_GPL(can_get_echo_skb); -/* - * Remove the skb from the stack and free it. - * - * The function is typically called when TX failed. - */ +/* Remove the skb from the stack and free it. + * + * The function is typically called when TX failed. + */ void can_free_echo_skb(struct net_device *dev, unsigned int idx) { struct can_priv *priv = netdev_priv(dev); @@ -533,9 +535,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx) } EXPORT_SYMBOL_GPL(can_free_echo_skb); -/* - * CAN device restart for bus-off recovery - */ +/* CAN device restart for bus-off recovery */ static void can_restart(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -546,15 +546,14 @@ static void can_restart(struct net_device *dev) BUG_ON(netif_carrier_ok(dev)); - /* - * No synchronization needed because the device is bus-off and + /* No synchronization needed because the device is bus-off and * no messages can come in or go out. */ can_flush_echo_skb(dev); /* send restart message upstream */ skb = alloc_can_err_skb(dev, &cf); - if (skb == NULL) { + if (!skb) { err = -ENOMEM; goto restart; } @@ -580,7 +579,8 @@ restart: static void can_restart_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); - struct can_priv *priv = container_of(dwork, struct can_priv, restart_work); + struct can_priv *priv = container_of(dwork, struct can_priv, + restart_work); can_restart(priv->dev); } @@ -589,8 +589,7 @@ int can_restart_now(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - /* - * A manual restart is only permitted if automatic restart is + /* A manual restart is only permitted if automatic restart is * disabled and the device is in the bus-off state */ if (priv->restart_ms) @@ -604,8 +603,7 @@ int can_restart_now(struct net_device *dev) return 0; } -/* - * CAN bus-off +/* CAN bus-off * * This functions should be called when the device goes bus-off to * tell the netif layer that no more packets can be sent or received. @@ -708,9 +706,7 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf) } EXPORT_SYMBOL_GPL(alloc_can_err_skb); -/* - * Allocate and setup space for the CAN network device - */ +/* Allocate and setup space for the CAN network device */ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, unsigned int txqs, unsigned int rxqs) { @@ -718,11 +714,24 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, struct can_priv *priv; int size; + /* We put the driver's priv, the CAN mid layer priv and the + * echo skb into the netdevice's priv. The memory layout for + * the netdev_priv is like this: + * + * +-------------------------+ + * | driver's priv | + * +-------------------------+ + * | struct can_ml_priv | + * +-------------------------+ + * | array of struct sk_buff | + * +-------------------------+ + */ + + size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv); + if (echo_skb_max) - size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) + + size = ALIGN(size, sizeof(struct sk_buff *)) + echo_skb_max * sizeof(struct sk_buff *); - else - size = sizeof_priv; dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup, txqs, rxqs); @@ -732,10 +741,12 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, priv = netdev_priv(dev); priv->dev = dev; + dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN); + if (echo_skb_max) { priv->echo_skb_max = echo_skb_max; priv->echo_skb = (void *)priv + - ALIGN(sizeof_priv, sizeof(struct sk_buff *)); + (size - echo_skb_max * sizeof(struct sk_buff *)); } priv->state = CAN_STATE_STOPPED; @@ -746,18 +757,14 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, } EXPORT_SYMBOL_GPL(alloc_candev_mqs); -/* - * Free space of the CAN network device - */ +/* Free space of the CAN network device */ void free_candev(struct net_device *dev) { free_netdev(dev); } EXPORT_SYMBOL_GPL(free_candev); -/* - * changing MTU and control mode for CAN/CANFD devices - */ +/* changing MTU and control mode for CAN/CANFD devices */ int can_change_mtu(struct net_device *dev, int new_mtu) { struct can_priv *priv = netdev_priv(dev); @@ -794,8 +801,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu) } EXPORT_SYMBOL_GPL(can_change_mtu); -/* - * Common open function when the device gets opened. +/* Common open function when the device gets opened. * * This function should be called in the open function of the device * driver. @@ -812,7 +818,7 @@ int open_candev(struct net_device *dev) /* For CAN FD the data bitrate has to be >= the arbitration bitrate */ if ((priv->ctrlmode & CAN_CTRLMODE_FD) && (!priv->data_bittiming.bitrate || - (priv->data_bittiming.bitrate < priv->bittiming.bitrate))) { + priv->data_bittiming.bitrate < priv->bittiming.bitrate)) { netdev_err(dev, "incorrect/missing data bit-timing\n"); return -EINVAL; } @@ -848,8 +854,7 @@ void of_can_transceiver(struct net_device *dev) EXPORT_SYMBOL_GPL(of_can_transceiver); #endif -/* - * Common close function for cleanup before the device gets closed. +/* Common close function for cleanup before the device gets closed. * * This function should be called in the close function of the device * driver. @@ -863,9 +868,7 @@ void close_candev(struct net_device *dev) } EXPORT_SYMBOL_GPL(close_candev); -/* - * CAN netlink interface - */ +/* CAN netlink interface */ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { [IFLA_CAN_STATE] = { .type = NLA_U32 }, [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, @@ -1209,7 +1212,6 @@ static int can_newlink(struct net *src_net, struct net_device *dev, static void can_dellink(struct net_device *dev, struct list_head *head) { - return; } static struct rtnl_link_ops can_link_ops __read_mostly = { @@ -1227,9 +1229,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = { .fill_xstats = can_fill_xstats, }; -/* - * Register the CAN network device - */ +/* Register the CAN network device */ int register_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -1255,22 +1255,19 @@ int register_candev(struct net_device *dev) } EXPORT_SYMBOL_GPL(register_candev); -/* - * Unregister the CAN network device - */ +/* Unregister the CAN network device */ void unregister_candev(struct net_device *dev) { unregister_netdev(dev); } EXPORT_SYMBOL_GPL(unregister_candev); -/* - * Test if a network device is a candev based device +/* Test if a network device is a candev based device * and return the can_priv* if so. */ struct can_priv *safe_candev_priv(struct net_device *dev) { - if ((dev->type != ARPHRD_CAN) || (dev->rtnl_link_ops != &can_link_ops)) + if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops) return NULL; return netdev_priv(dev); @@ -1285,7 +1282,7 @@ static __init int can_dev_init(void) err = rtnl_link_register(&can_link_ops); if (!err) - printk(KERN_INFO MOD_DESC "\n"); + pr_info(MOD_DESC "\n"); return err; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index fcec8bcb53d6..dc5695dffc2e 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -24,6 +24,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/regmap.h> @@ -266,6 +267,7 @@ struct flexcan_stop_mode { struct flexcan_priv { struct can_priv can; struct can_rx_offload offload; + struct device *dev; struct flexcan_regs __iomem *regs; struct flexcan_mb __iomem *tx_mb; @@ -273,6 +275,8 @@ struct flexcan_priv { u8 tx_mb_idx; u8 mb_count; u8 mb_size; + u8 clk_src; /* clock source of CAN Protocol Engine */ + u32 reg_ctrl_default; u32 reg_imask1_default; u32 reg_imask2_default; @@ -462,6 +466,27 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv) priv->write(reg_ctrl, ®s->ctrl); } +static int flexcan_clks_enable(const struct flexcan_priv *priv) +{ + int err; + + err = clk_prepare_enable(priv->clk_ipg); + if (err) + return err; + + err = clk_prepare_enable(priv->clk_per); + if (err) + clk_disable_unprepare(priv->clk_ipg); + + return err; +} + +static void flexcan_clks_disable(const struct flexcan_priv *priv) +{ + clk_disable_unprepare(priv->clk_per); + clk_disable_unprepare(priv->clk_ipg); +} + static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) { if (!priv->reg_xceiver) @@ -588,19 +613,13 @@ static int flexcan_get_berr_counter(const struct net_device *dev, const struct flexcan_priv *priv = netdev_priv(dev); int err; - err = clk_prepare_enable(priv->clk_ipg); - if (err) + err = pm_runtime_get_sync(priv->dev); + if (err < 0) return err; - err = clk_prepare_enable(priv->clk_per); - if (err) - goto out_disable_ipg; - err = __flexcan_get_berr_counter(dev, bec); - clk_disable_unprepare(priv->clk_per); - out_disable_ipg: - clk_disable_unprepare(priv->clk_ipg); + pm_runtime_put(priv->dev); return err; } @@ -1233,17 +1252,13 @@ static int flexcan_open(struct net_device *dev) struct flexcan_priv *priv = netdev_priv(dev); int err; - err = clk_prepare_enable(priv->clk_ipg); - if (err) + err = pm_runtime_get_sync(priv->dev); + if (err < 0) return err; - err = clk_prepare_enable(priv->clk_per); - if (err) - goto out_disable_ipg; - err = open_candev(dev); if (err) - goto out_disable_per; + goto out_runtime_put; err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); if (err) @@ -1306,10 +1321,8 @@ static int flexcan_open(struct net_device *dev) free_irq(dev->irq, dev); out_close: close_candev(dev); - out_disable_per: - clk_disable_unprepare(priv->clk_per); - out_disable_ipg: - clk_disable_unprepare(priv->clk_ipg); + out_runtime_put: + pm_runtime_put(priv->dev); return err; } @@ -1324,10 +1337,9 @@ static int flexcan_close(struct net_device *dev) can_rx_offload_del(&priv->offload); free_irq(dev->irq, dev); - clk_disable_unprepare(priv->clk_per); - clk_disable_unprepare(priv->clk_ipg); close_candev(dev); + pm_runtime_put(priv->dev); can_led_event(dev, CAN_LED_EVENT_STOP); @@ -1367,20 +1379,20 @@ static int register_flexcandev(struct net_device *dev) struct flexcan_regs __iomem *regs = priv->regs; u32 reg, err; - err = clk_prepare_enable(priv->clk_ipg); + err = flexcan_clks_enable(priv); if (err) return err; - err = clk_prepare_enable(priv->clk_per); - if (err) - goto out_disable_ipg; - /* select "bus clock", chip must be disabled */ err = flexcan_chip_disable(priv); if (err) - goto out_disable_per; + goto out_clks_disable; + reg = priv->read(®s->ctrl); - reg |= FLEXCAN_CTRL_CLK_SRC; + if (priv->clk_src) + reg |= FLEXCAN_CTRL_CLK_SRC; + else + reg &= ~FLEXCAN_CTRL_CLK_SRC; priv->write(reg, ®s->ctrl); err = flexcan_chip_enable(priv); @@ -1406,15 +1418,21 @@ static int register_flexcandev(struct net_device *dev) } err = register_candev(dev); + if (err) + goto out_chip_disable; - /* disable core and turn off clocks */ - out_chip_disable: + /* Disable core and let pm_runtime_put() disable the clocks. + * If CONFIG_PM is not enabled, the clocks will stay powered. + */ flexcan_chip_disable(priv); - out_disable_per: - clk_disable_unprepare(priv->clk_per); - out_disable_ipg: - clk_disable_unprepare(priv->clk_ipg); + pm_runtime_put(priv->dev); + return 0; + + out_chip_disable: + flexcan_chip_disable(priv); + out_clks_disable: + flexcan_clks_disable(priv); return err; } @@ -1473,6 +1491,11 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) device_set_wakeup_capable(&pdev->dev, true); + if (of_property_read_bool(np, "wakeup-source")) + device_set_wakeup_enable(&pdev->dev, true); + + return 0; + out_put_node: of_node_put(gpr_np); return ret; @@ -1508,6 +1531,7 @@ static int flexcan_probe(struct platform_device *pdev) struct clk *clk_ipg = NULL, *clk_per = NULL; struct flexcan_regs __iomem *regs; int err, irq; + u8 clk_src = 1; u32 clock_freq = 0; reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); @@ -1516,9 +1540,12 @@ static int flexcan_probe(struct platform_device *pdev) else if (IS_ERR(reg_xceiver)) reg_xceiver = NULL; - if (pdev->dev.of_node) + if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "clock-frequency", &clock_freq); + of_property_read_u8(pdev->dev.of_node, + "fsl,clk-source", &clk_src); + } if (!clock_freq) { clk_ipg = devm_clk_get(&pdev->dev, "ipg"); @@ -1576,6 +1603,7 @@ static int flexcan_probe(struct platform_device *pdev) priv->write = flexcan_write_le; } + priv->dev = &pdev->dev; priv->can.clock.freq = clock_freq; priv->can.bittiming_const = &flexcan_bittiming_const; priv->can.do_set_mode = flexcan_set_mode; @@ -1586,9 +1614,14 @@ static int flexcan_probe(struct platform_device *pdev) priv->regs = regs; priv->clk_ipg = clk_ipg; priv->clk_per = clk_per; + priv->clk_src = clk_src; priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + err = register_flexcandev(dev); if (err) { dev_err(&pdev->dev, "registering netdev failed\n"); @@ -1615,6 +1648,7 @@ static int flexcan_remove(struct platform_device *pdev) struct net_device *dev = platform_get_drvdata(pdev); unregister_flexcandev(dev); + pm_runtime_disable(&pdev->dev); free_candev(dev); return 0; @@ -1624,7 +1658,7 @@ static int __maybe_unused flexcan_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); - int err; + int err = 0; if (netif_running(dev)) { /* if wakeup is enabled, enter stop mode @@ -1639,20 +1673,22 @@ static int __maybe_unused flexcan_suspend(struct device *device) err = flexcan_chip_disable(priv); if (err) return err; + + err = pm_runtime_force_suspend(device); } netif_stop_queue(dev); netif_device_detach(dev); } priv->can.state = CAN_STATE_SLEEPING; - return 0; + return err; } static int __maybe_unused flexcan_resume(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); - int err; + int err = 0; priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(dev)) { @@ -1661,14 +1697,35 @@ static int __maybe_unused flexcan_resume(struct device *device) if (device_may_wakeup(device)) { disable_irq_wake(dev->irq); } else { - err = flexcan_chip_enable(priv); + err = pm_runtime_force_resume(device); if (err) return err; + + err = flexcan_chip_enable(priv); } } + + return err; +} + +static int __maybe_unused flexcan_runtime_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct flexcan_priv *priv = netdev_priv(dev); + + flexcan_clks_disable(priv); + return 0; } +static int __maybe_unused flexcan_runtime_resume(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct flexcan_priv *priv = netdev_priv(dev); + + return flexcan_clks_enable(priv); +} + static int __maybe_unused flexcan_noirq_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); @@ -1698,6 +1755,7 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) static const struct dev_pm_ops flexcan_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(flexcan_suspend, flexcan_resume) + SET_RUNTIME_PM_OPS(flexcan_runtime_suspend, flexcan_runtime_resume, NULL) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(flexcan_noirq_suspend, flexcan_noirq_resume) }; diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 19d4f52a8f90..a761092e6ac9 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1936,7 +1936,6 @@ static int ican3_probe(struct platform_device *pdev) /* find our IRQ number */ mod->irq = platform_get_irq(pdev, 0); if (mod->irq < 0) { - dev_err(dev, "IRQ line not found\n"); ret = -ENODEV; goto out_free_ndev; } diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c new file mode 100644 index 000000000000..6f766918211a --- /dev/null +++ b/drivers/net/can/kvaser_pciefd.c @@ -0,0 +1,1911 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. + * Parts of this driver are based on the following: + * - Kvaser linux pciefd driver (version 5.25) + * - PEAK linux canfd driver + * - Altera Avalon EPCS flash controller driver + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/can/dev.h> +#include <linux/timer.h> +#include <linux/netdevice.h> +#include <linux/crc32.h> +#include <linux/iopoll.h> + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); +MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); + +#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" + +#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) +#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) +#define KVASER_PCIEFD_MAX_ERR_REP 256 +#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17 +#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4 +#define KVASER_PCIEFD_DMA_COUNT 2 + +#define KVASER_PCIEFD_DMA_SIZE (4 * 1024) +#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0) + +#define KVASER_PCIEFD_VENDOR 0x1a07 +#define KVASER_PCIEFD_4HS_ID 0x0d +#define KVASER_PCIEFD_2HS_ID 0x0e +#define KVASER_PCIEFD_HS_ID 0x0f +#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10 +#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11 + +/* PCIe IRQ registers */ +#define KVASER_PCIEFD_IRQ_REG 0x40 +#define KVASER_PCIEFD_IEN_REG 0x50 +/* DMA map */ +#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000 +/* Kvaser KCAN CAN controller registers */ +#define KVASER_PCIEFD_KCAN0_BASE 0x10000 +#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000 +#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 +#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 +#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 +#define KVASER_PCIEFD_KCAN_CMD_REG 0x400 +#define KVASER_PCIEFD_KCAN_IEN_REG 0x408 +#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 +#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414 +#define KVASER_PCIEFD_KCAN_STAT_REG 0x418 +#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c +#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 +#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 +#define KVASER_PCIEFD_KCAN_PWM_REG 0x430 +/* Loopback control register */ +#define KVASER_PCIEFD_LOOP_REG 0x1f000 +/* System identification and information registers */ +#define KVASER_PCIEFD_SYSID_BASE 0x1f020 +#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8) +#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc) +#define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10) +#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) +/* Shared receive buffer registers */ +#define KVASER_PCIEFD_SRB_BASE 0x1f200 +#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) +#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) +#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) +#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) +#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) +/* EPCS flash controller registers */ +#define KVASER_PCIEFD_SPI_BASE 0x1fc00 +#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE +#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4) +#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8) +#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc) +#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14) + +#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f +#define KVASER_PCIEFD_IRQ_SRB BIT(4) + +#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24 +#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16 +#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1 + +/* Reset DMA buffer 0, 1 and FIFO offset */ +#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) +#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) +#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) + +/* DMA packet done, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) +#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) +/* DMA overflow, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) +#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) +/* DMA underflow, buffer 0 and 1 */ +#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) +#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) + +/* DMA idle */ +#define KVASER_PCIEFD_SRB_STAT_DI BIT(15) +/* DMA support */ +#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) + +/* DMA Enable */ +#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) + +/* EPCS flash controller definitions */ +#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024) +#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L) +#define KVASER_PCIEFD_CFG_MAX_PARAMS 256 +#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d +#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24 +#define KVASER_PCIEFD_CFG_SYS_VER 1 +#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130 +#define KVASER_PCIEFD_SPI_TMT BIT(5) +#define KVASER_PCIEFD_SPI_TRDY BIT(6) +#define KVASER_PCIEFD_SPI_RRDY BIT(7) +#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14 +/* Commands for controlling the onboard flash */ +#define KVASER_PCIEFD_FLASH_RES_CMD 0xab +#define KVASER_PCIEFD_FLASH_READ_CMD 0x3 +#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5 + +/* Kvaser KCAN definitions */ +#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29) +#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29) + +#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16 +/* Request status packet */ +#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) +/* Abort, flush and reset */ +#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) + +/* Tx FIFO unaligned read */ +#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) +/* Tx FIFO unaligned end */ +#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) +/* Bus parameter protection error */ +#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) +/* FDF bit when controller is in classic mode */ +#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) +/* Rx FIFO overflow */ +#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) +/* Abort done */ +#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) +/* Tx buffer flush done */ +#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) +/* Tx FIFO overflow */ +#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) +/* Tx FIFO empty */ +#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) +/* Transmitter unaligned */ +#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) + +#define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16 + +#define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24 +/* Abort request */ +#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) +/* Idle state. Controller in reset mode and no abort or flush pending */ +#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) +/* Bus off */ +#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) +/* Reset mode request */ +#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) +/* Controller in reset mode */ +#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) +/* Controller got one-shot capability */ +#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) +/* Controller got CAN FD capability */ +#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) +#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \ + KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \ + KVASER_PCIEFD_KCAN_STAT_IRM) + +/* Reset mode */ +#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) +/* Listen only mode */ +#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) +/* Error packet enable */ +#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) +/* CAN FD non-ISO */ +#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) +/* Acknowledgment packet type */ +#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) +/* Active error flag enable. Clear to force error passive */ +#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) +/* Classic CAN mode */ +#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) + +#define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13 +#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17 +#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26 + +#define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16 + +/* Kvaser KCAN packet types */ +#define KVASER_PCIEFD_PACK_TYPE_DATA 0 +#define KVASER_PCIEFD_PACK_TYPE_ACK 1 +#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2 +#define KVASER_PCIEFD_PACK_TYPE_ERROR 3 +#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4 +#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5 +#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6 +#define KVASER_PCIEFD_PACK_TYPE_STATUS 8 +#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9 + +/* Kvaser KCAN packet common definitions */ +#define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff +#define KVASER_PCIEFD_PACKET_CHID_SHIFT 25 +#define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28 + +/* Kvaser KCAN TDATA and RDATA first word */ +#define KVASER_PCIEFD_RPACKET_IDE BIT(30) +#define KVASER_PCIEFD_RPACKET_RTR BIT(29) +/* Kvaser KCAN TDATA and RDATA second word */ +#define KVASER_PCIEFD_RPACKET_ESI BIT(13) +#define KVASER_PCIEFD_RPACKET_BRS BIT(14) +#define KVASER_PCIEFD_RPACKET_FDF BIT(15) +#define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8 +/* Kvaser KCAN TDATA second word */ +#define KVASER_PCIEFD_TPACKET_SMS BIT(16) +#define KVASER_PCIEFD_TPACKET_AREQ BIT(31) + +/* Kvaser KCAN APACKET */ +#define KVASER_PCIEFD_APACKET_FLU BIT(8) +#define KVASER_PCIEFD_APACKET_CT BIT(9) +#define KVASER_PCIEFD_APACKET_ABL BIT(10) +#define KVASER_PCIEFD_APACKET_NACK BIT(11) + +/* Kvaser KCAN SPACK first word */ +#define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8 +#define KVASER_PCIEFD_SPACK_BOFF BIT(16) +#define KVASER_PCIEFD_SPACK_IDET BIT(20) +#define KVASER_PCIEFD_SPACK_IRM BIT(21) +#define KVASER_PCIEFD_SPACK_RMCD BIT(22) +/* Kvaser KCAN SPACK second word */ +#define KVASER_PCIEFD_SPACK_AUTO BIT(21) +#define KVASER_PCIEFD_SPACK_EWLR BIT(23) +#define KVASER_PCIEFD_SPACK_EPLR BIT(24) + +struct kvaser_pciefd; + +struct kvaser_pciefd_can { + struct can_priv can; + struct kvaser_pciefd *kv_pcie; + void __iomem *reg_base; + struct can_berr_counter bec; + u8 cmd_seq; + int err_rep_cnt; + int echo_idx; + spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ + spinlock_t echo_lock; /* Locks the message echo buffer */ + struct timer_list bec_poll_timer; + struct completion start_comp, flush_comp; +}; + +struct kvaser_pciefd { + struct pci_dev *pci; + void __iomem *reg_base; + struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; + void *dma_data[KVASER_PCIEFD_DMA_COUNT]; + u8 nr_channels; + u32 bus_freq; + u32 freq; + u32 freq_to_ticks_div; +}; + +struct kvaser_pciefd_rx_packet { + u32 header[2]; + u64 timestamp; +}; + +struct kvaser_pciefd_tx_packet { + u32 header[2]; + u8 data[64]; +}; + +static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { + .name = KVASER_PCIEFD_DRV_NAME, + .tseg1_min = 1, + .tseg1_max = 255, + .tseg2_min = 1, + .tseg2_max = 32, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 4096, + .brp_inc = 1, +}; + +struct kvaser_pciefd_cfg_param { + __le32 magic; + __le32 nr; + __le32 len; + u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ]; +}; + +struct kvaser_pciefd_cfg_img { + __le32 version; + __le32 magic; + __le32 crc; + struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS]; +}; + +static struct pci_device_id kvaser_pciefd_id_table[] = { + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), }, + { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), }, + { 0,}, +}; +MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); + +/* Onboard flash memory functions */ +static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk) +{ + u32 res; + int ret; + + ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG, + res, res & msk, 0, 10); + + return ret; +} + +static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx, + u32 tx_len, u8 *rx, u32 rx_len) +{ + int c; + + iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG); + iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); + ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); + + c = tx_len; + while (c--) { + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) + return -EIO; + + iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); + + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) + return -EIO; + + ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); + } + + c = rx_len; + while (c-- > 0) { + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY)) + return -EIO; + + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG); + + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY)) + return -EIO; + + *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG); + } + + if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT)) + return -EIO; + + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG); + + if (c != -1) { + dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n"); + return -EIO; + } + + return 0; +} + +static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_cfg_img *img) +{ + int offset = KVASER_PCIEFD_CFG_IMG_OFFSET; + int res, crc; + u8 *crc_buff; + + u8 cmd[] = { + KVASER_PCIEFD_FLASH_READ_CMD, + (u8)((offset >> 16) & 0xff), + (u8)((offset >> 8) & 0xff), + (u8)(offset & 0xff) + }; + + res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img, + KVASER_PCIEFD_CFG_IMG_SZ); + if (res) + return res; + + crc_buff = (u8 *)img->params; + + if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) { + dev_err(&pcie->pci->dev, + "Config flash corrupted, version number is wrong\n"); + return -ENODEV; + } + + if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) { + dev_err(&pcie->pci->dev, + "Config flash corrupted, magic number is wrong\n"); + return -ENODEV; + } + + crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params)); + if (le32_to_cpu(img->crc) != crc) { + dev_err(&pcie->pci->dev, + "Stored CRC does not match flash image contents\n"); + return -EIO; + } + + return 0; +} + +static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_cfg_img *img) +{ + struct kvaser_pciefd_cfg_param *param; + + param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN]; + memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len)); +} + +static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie) +{ + int res; + struct kvaser_pciefd_cfg_img *img; + + /* Read electronic signature */ + u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0}; + + res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1); + if (res) + return -EIO; + + img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL); + if (!img) + return -ENOMEM; + + if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) { + dev_err(&pcie->pci->dev, + "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n", + cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16); + + res = -ENODEV; + goto image_free; + } + + cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD; + res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1); + if (res) { + goto image_free; + } else if (cmd[0] & 1) { + res = -EIO; + /* No write is ever done, the WIP should never be set */ + dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n"); + goto image_free; + } + + res = kvaser_pciefd_cfg_read_and_verify(pcie, img); + if (res) { + res = -EIO; + goto image_free; + } + + kvaser_pciefd_cfg_read_params(pcie, img); + +image_free: + kfree(img); + return res; +} + +static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) +{ + u32 cmd; + + cmd = KVASER_PCIEFD_KCAN_CMD_SRQ; + cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; + iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); +} + +static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { + mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + } + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + spin_unlock_irqrestore(&can->lock, irq); +} + +static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) +{ + u32 msk; + + msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | + KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | + KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | + KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | + KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD; + + iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + + return 0; +} + +static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + if (can->can.ctrlmode & CAN_CTRLMODE_FD) { + mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; + if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; + else + mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; + } else { + mode |= KVASER_PCIEFD_KCAN_MODE_CCM; + mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; + } + + if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + mode |= KVASER_PCIEFD_KCAN_MODE_LOM; + + mode |= KVASER_PCIEFD_KCAN_MODE_EEN; + mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; + /* Use ACK packet type */ + mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; + mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) +{ + u32 status; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + + status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); + if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { + u32 cmd; + + /* If controller is already idle, run abort, flush and reset */ + cmd = KVASER_PCIEFD_KCAN_CMD_AT; + cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; + iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); + } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { + u32 mode; + + /* Put controller in reset mode */ + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + mode |= KVASER_PCIEFD_KCAN_MODE_RM; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + } + + spin_unlock_irqrestore(&can->lock, irq); +} + +static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) +{ + u32 mode; + unsigned long irq; + + del_timer(&can->bec_poll_timer); + + if (!completion_done(&can->flush_comp)) + kvaser_pciefd_start_controller_flush(can); + + if (!wait_for_completion_timeout(&can->flush_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during bus on flush\n"); + return -ETIMEDOUT; + } + + spin_lock_irqsave(&can->lock, irq); + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + spin_unlock_irqrestore(&can->lock, irq); + + if (!wait_for_completion_timeout(&can->start_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during bus on reset\n"); + return -ETIMEDOUT; + } + /* Reset interrupt handling */ + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + + kvaser_pciefd_set_tx_irq(can); + kvaser_pciefd_setup_controller(can); + + can->can.state = CAN_STATE_ERROR_ACTIVE; + netif_wake_queue(can->can.dev); + can->bec.txerr = 0; + can->bec.rxerr = 0; + can->err_rep_cnt = 0; + + return 0; +} + +static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) +{ + u8 top; + u32 pwm_ctrl; + unsigned long irq; + + spin_lock_irqsave(&can->lock, irq); + pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff; + + /* Set duty cycle to zero */ + pwm_ctrl |= top; + iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + spin_unlock_irqrestore(&can->lock, irq); +} + +static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) +{ + int top, trigger; + u32 pwm_ctrl; + unsigned long irq; + + kvaser_pciefd_pwm_stop(can); + spin_lock_irqsave(&can->lock, irq); + + /* Set frequency to 500 KHz*/ + top = can->kv_pcie->bus_freq / (2 * 500000) - 1; + + pwm_ctrl = top & 0xff; + pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; + iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + + /* Set duty cycle to 95 */ + trigger = (100 * top - 95 * (top + 1) + 50) / 100; + pwm_ctrl = trigger & 0xff; + pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; + iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); + spin_unlock_irqrestore(&can->lock, irq); +} + +static int kvaser_pciefd_open(struct net_device *netdev) +{ + int err; + struct kvaser_pciefd_can *can = netdev_priv(netdev); + + err = open_candev(netdev); + if (err) + return err; + + err = kvaser_pciefd_bus_on(can); + if (err) + return err; + + return 0; +} + +static int kvaser_pciefd_stop(struct net_device *netdev) +{ + struct kvaser_pciefd_can *can = netdev_priv(netdev); + int ret = 0; + + /* Don't interrupt ongoing flush */ + if (!completion_done(&can->flush_comp)) + kvaser_pciefd_start_controller_flush(can); + + if (!wait_for_completion_timeout(&can->flush_comp, + KVASER_PCIEFD_WAIT_TIMEOUT)) { + netdev_err(can->can.dev, "Timeout during stop\n"); + ret = -ETIMEDOUT; + } else { + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + del_timer(&can->bec_poll_timer); + } + close_candev(netdev); + + return ret; +} + +static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, + struct kvaser_pciefd_can *can, + struct sk_buff *skb) +{ + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + int packet_size; + int seq = can->echo_idx; + + memset(p, 0, sizeof(*p)); + + if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; + + if (cf->can_id & CAN_RTR_FLAG) + p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; + + if (cf->can_id & CAN_EFF_FLAG) + p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; + + p->header[0] |= cf->can_id & CAN_EFF_MASK; + p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT; + p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; + + if (can_is_canfd_skb(skb)) { + p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; + if (cf->flags & CANFD_BRS) + p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; + if (cf->flags & CANFD_ESI) + p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; + } + + p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK; + + packet_size = cf->len; + memcpy(p->data, cf->data, packet_size); + + return DIV_ROUND_UP(packet_size, 4); +} + +static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct kvaser_pciefd_can *can = netdev_priv(netdev); + unsigned long irq_flags; + struct kvaser_pciefd_tx_packet packet; + int nwords; + u8 count; + + if (can_dropped_invalid_skb(netdev, skb)) + return NETDEV_TX_OK; + + nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); + + spin_lock_irqsave(&can->echo_lock, irq_flags); + + /* Prepare and save echo skb in internal slot */ + can_put_echo_skb(skb, netdev, can->echo_idx); + + /* Move echo index to the next slot */ + can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; + + /* Write header to fifo */ + iowrite32(packet.header[0], + can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); + iowrite32(packet.header[1], + can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); + + if (nwords) { + u32 data_last = ((u32 *)packet.data)[nwords - 1]; + + /* Write data to fifo, except last word */ + iowrite32_rep(can->reg_base + + KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, + nwords - 1); + /* Write last word to end of fifo */ + __raw_writel(data_last, can->reg_base + + KVASER_PCIEFD_KCAN_FIFO_LAST_REG); + } else { + /* Complete write to fifo */ + __raw_writel(0, can->reg_base + + KVASER_PCIEFD_KCAN_FIFO_LAST_REG); + } + + count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); + /* No room for a new message, stop the queue until at least one + * successful transmit + */ + if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT || + can->can.echo_skb[can->echo_idx]) + netif_stop_queue(netdev); + + spin_unlock_irqrestore(&can->echo_lock, irq_flags); + + return NETDEV_TX_OK; +} + +static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) +{ + u32 mode, test, btrn; + unsigned long irq_flags; + int ret; + struct can_bittiming *bt; + + if (data) + bt = &can->can.data_bittiming; + else + bt = &can->can.bittiming; + + btrn = ((bt->phase_seg2 - 1) & 0x1f) << + KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT | + (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) << + KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT | + ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT | + ((bt->brp - 1) & 0x1fff); + + spin_lock_irqsave(&can->lock, irq_flags); + mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + + /* Put the circuit in reset mode */ + iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, + can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + + /* Can only set bittiming if in reset mode */ + ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, + test, test & KVASER_PCIEFD_KCAN_MODE_RM, + 0, 10); + + if (ret) { + spin_unlock_irqrestore(&can->lock, irq_flags); + return -EBUSY; + } + + if (data) + iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); + else + iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); + + /* Restore previous reset mode status */ + iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); + + spin_unlock_irqrestore(&can->lock, irq_flags); + return 0; +} + +static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) +{ + return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); +} + +static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) +{ + return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); +} + +static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) +{ + struct kvaser_pciefd_can *can = netdev_priv(ndev); + int ret = 0; + + switch (mode) { + case CAN_MODE_START: + if (!can->can.restart_ms) + ret = kvaser_pciefd_bus_on(can); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct kvaser_pciefd_can *can = netdev_priv(ndev); + + bec->rxerr = can->bec.rxerr; + bec->txerr = can->bec.txerr; + return 0; +} + +static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) +{ + struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); + + kvaser_pciefd_enable_err_gen(can); + kvaser_pciefd_request_status(can); + can->err_rep_cnt = 0; +} + +static const struct net_device_ops kvaser_pciefd_netdev_ops = { + .ndo_open = kvaser_pciefd_open, + .ndo_stop = kvaser_pciefd_stop, + .ndo_start_xmit = kvaser_pciefd_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) +{ + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + struct net_device *netdev; + struct kvaser_pciefd_can *can; + u32 status, tx_npackets; + + netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), + KVASER_PCIEFD_CAN_TX_MAX_COUNT); + if (!netdev) + return -ENOMEM; + + can = netdev_priv(netdev); + netdev->netdev_ops = &kvaser_pciefd_netdev_ops; + can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE + + i * KVASER_PCIEFD_KCAN_BASE_OFFSET; + + can->kv_pcie = pcie; + can->cmd_seq = 0; + can->err_rep_cnt = 0; + can->bec.txerr = 0; + can->bec.rxerr = 0; + + init_completion(&can->start_comp); + init_completion(&can->flush_comp); + timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, + 0); + + tx_npackets = ioread32(can->reg_base + + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); + if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) & + 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) { + dev_err(&pcie->pci->dev, + "Max Tx count is smaller than expected\n"); + + free_candev(netdev); + return -ENODEV; + } + + can->can.clock.freq = pcie->freq; + can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT; + can->echo_idx = 0; + spin_lock_init(&can->echo_lock); + spin_lock_init(&can->lock); + can->can.bittiming_const = &kvaser_pciefd_bittiming_const; + can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; + + can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; + can->can.do_set_data_bittiming = + kvaser_pciefd_set_data_bittiming; + + can->can.do_set_mode = kvaser_pciefd_set_mode; + can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; + + can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_FD | + CAN_CTRLMODE_FD_NON_ISO; + + status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); + if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { + dev_err(&pcie->pci->dev, + "CAN FD not supported as expected %d\n", i); + + free_candev(netdev); + return -ENODEV; + } + + if (status & KVASER_PCIEFD_KCAN_STAT_CAP) + can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; + + netdev->flags |= IFF_ECHO; + + SET_NETDEV_DEV(netdev, &pcie->pci->dev); + + iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | + KVASER_PCIEFD_KCAN_IRQ_TFD, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + + pcie->can[i] = can; + kvaser_pciefd_pwm_start(can); + } + + return 0; +} + +static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) +{ + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + int err = register_candev(pcie->can[i]->can.dev); + + if (err) { + int j; + + /* Unregister all successfully registered devices. */ + for (j = 0; j < i; j++) + unregister_candev(pcie->can[j]->can.dev); + return err; + } + } + + return 0; +} + +static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie, + dma_addr_t addr, int offset) +{ + u32 word1, word2; + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT; + word2 = addr >> 32; +#else + word1 = addr; + word2 = 0; +#endif + iowrite32(word1, pcie->reg_base + offset); + iowrite32(word2, pcie->reg_base + offset + 4); +} + +static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) +{ + int i; + u32 srb_status; + dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; + + /* Disable the DMA */ + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); + for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { + unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i; + + pcie->dma_data[i] = + dmam_alloc_coherent(&pcie->pci->dev, + KVASER_PCIEFD_DMA_SIZE, + &dma_addr[i], + GFP_KERNEL); + + if (!pcie->dma_data[i] || !dma_addr[i]) { + dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", + KVASER_PCIEFD_DMA_SIZE); + return -ENOMEM; + } + + kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset); + } + + /* Reset Rx FIFO, and both DMA buffers */ + iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | + KVASER_PCIEFD_SRB_CMD_RDB1, + pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + + srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); + if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { + dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); + return -EIO; + } + + /* Enable the DMA */ + iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, + pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); + + return 0; +} + +static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) +{ + u32 sysid, srb_status, build; + u8 sysid_nr_chan; + int ret; + + ret = kvaser_pciefd_read_cfg(pcie); + if (ret) + return ret; + + sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG); + sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff; + if (pcie->nr_channels != sysid_nr_chan) { + dev_err(&pcie->pci->dev, + "Number of channels does not match: %u vs %u\n", + pcie->nr_channels, + sysid_nr_chan); + return -ENODEV; + } + + if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS) + pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS; + + build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG); + dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n", + (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff, + sysid & 0xff, + (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff); + + srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); + if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { + dev_err(&pcie->pci->dev, + "Hardware without DMA is not supported\n"); + return -ENODEV; + } + + pcie->bus_freq = ioread32(pcie->reg_base + + KVASER_PCIEFD_SYSID_BUSFREQ_REG); + pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG); + pcie->freq_to_ticks_div = pcie->freq / 1000000; + if (pcie->freq_to_ticks_div == 0) + pcie->freq_to_ticks_div = 1; + + /* Turn off all loopback functionality */ + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG); + return ret; +} + +static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p, + __le32 *data) +{ + struct sk_buff *skb; + struct canfd_frame *cf; + struct can_priv *priv; + struct net_device_stats *stats; + struct skb_shared_hwtstamps *shhwtstamps; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + priv = &pcie->can[ch_id]->can; + stats = &priv->dev->stats; + + if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { + skb = alloc_canfd_skb(priv->dev, &cf); + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + + if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) + cf->flags |= CANFD_BRS; + + if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) + cf->flags |= CANFD_ESI; + } else { + skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + } + + cf->can_id = p->header[0] & CAN_EFF_MASK; + if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) + cf->can_id |= CAN_EFF_FLAG; + + cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT); + + if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, data, cf->len); + + shhwtstamps = skb_hwtstamps(skb); + + shhwtstamps->hwtstamp = + ns_to_ktime(div_u64(p->timestamp * 1000, + pcie->freq_to_ticks_div)); + + stats->rx_bytes += cf->len; + stats->rx_packets++; + + return netif_rx(skb); +} + +static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, + struct can_frame *cf, + enum can_state new_state, + enum can_state tx_state, + enum can_state rx_state) +{ + can_change_state(can->can.dev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { + struct net_device *ndev = can->can.dev; + unsigned long irq_flags; + + spin_lock_irqsave(&can->lock, irq_flags); + netif_stop_queue(can->can.dev); + spin_unlock_irqrestore(&can->lock, irq_flags); + + /* Prevent CAN controller from auto recover from bus off */ + if (!can->can.restart_ms) { + kvaser_pciefd_start_controller_flush(can); + can_bus_off(ndev); + } + } +} + +static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, + struct can_berr_counter *bec, + enum can_state *new_state, + enum can_state *tx_state, + enum can_state *rx_state) +{ + if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || + p->header[0] & KVASER_PCIEFD_SPACK_IRM) + *new_state = CAN_STATE_BUS_OFF; + else if (bec->txerr >= 255 || bec->rxerr >= 255) + *new_state = CAN_STATE_BUS_OFF; + else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) + *new_state = CAN_STATE_ERROR_PASSIVE; + else if (bec->txerr >= 128 || bec->rxerr >= 128) + *new_state = CAN_STATE_ERROR_PASSIVE; + else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) + *new_state = CAN_STATE_ERROR_WARNING; + else if (bec->txerr >= 96 || bec->rxerr >= 96) + *new_state = CAN_STATE_ERROR_WARNING; + else + *new_state = CAN_STATE_ERROR_ACTIVE; + + *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; + *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; +} + +static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_rx_packet *p) +{ + struct can_berr_counter bec; + enum can_state old_state, new_state, tx_state, rx_state; + struct net_device *ndev = can->can.dev; + struct sk_buff *skb; + struct can_frame *cf = NULL; + struct skb_shared_hwtstamps *shhwtstamps; + struct net_device_stats *stats = &ndev->stats; + + old_state = can->can.state; + + bec.txerr = p->header[0] & 0xff; + bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; + + kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, + &rx_state); + + skb = alloc_can_err_skb(ndev, &cf); + + if (new_state != old_state) { + kvaser_pciefd_change_state(can, cf, new_state, tx_state, + rx_state); + + if (old_state == CAN_STATE_BUS_OFF && + new_state == CAN_STATE_ERROR_ACTIVE && + can->can.restart_ms) { + can->can.can_stats.restarts++; + if (skb) + cf->can_id |= CAN_ERR_RESTARTED; + } + } + + can->err_rep_cnt++; + can->can.can_stats.bus_error++; + stats->rx_errors++; + + can->bec.txerr = bec.txerr; + can->bec.rxerr = bec.rxerr; + + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(div_u64(p->timestamp * 1000, + can->kv_pcie->freq_to_ticks_div)); + cf->can_id |= CAN_ERR_BUSERROR; + + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + netif_rx(skb); + return 0; +} + +static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + kvaser_pciefd_rx_error_frame(can, p); + if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) + /* Do not report more errors, until bec_poll_timer expires */ + kvaser_pciefd_disable_err_gen(can); + /* Start polling the error counters */ + mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); + return 0; +} + +static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_rx_packet *p) +{ + struct can_berr_counter bec; + enum can_state old_state, new_state, tx_state, rx_state; + + old_state = can->can.state; + + bec.txerr = p->header[0] & 0xff; + bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff; + + kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, + &rx_state); + + if (new_state != old_state) { + struct net_device *ndev = can->can.dev; + struct sk_buff *skb; + struct can_frame *cf; + struct skb_shared_hwtstamps *shhwtstamps; + + skb = alloc_can_err_skb(ndev, &cf); + if (!skb) { + struct net_device_stats *stats = &ndev->stats; + + stats->rx_dropped++; + return -ENOMEM; + } + + kvaser_pciefd_change_state(can, cf, new_state, tx_state, + rx_state); + + if (old_state == CAN_STATE_BUS_OFF && + new_state == CAN_STATE_ERROR_ACTIVE && + can->can.restart_ms) { + can->can.can_stats.restarts++; + cf->can_id |= CAN_ERR_RESTARTED; + } + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(div_u64(p->timestamp * 1000, + can->kv_pcie->freq_to_ticks_div)); + + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + netif_rx(skb); + } + can->bec.txerr = bec.txerr; + can->bec.rxerr = bec.rxerr; + /* Check if we need to poll the error counters */ + if (bec.txerr || bec.rxerr) + mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); + + return 0; +} + +static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + u8 cmdseq; + u32 status; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); + cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff; + + /* Reset done, start abort and flush */ + if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && + p->header[0] & KVASER_PCIEFD_SPACK_RMCD && + p->header[1] & KVASER_PCIEFD_SPACK_AUTO && + cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && + status & KVASER_PCIEFD_KCAN_STAT_IDLE) { + u32 cmd; + + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, + can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + cmd = KVASER_PCIEFD_KCAN_CMD_AT; + cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; + iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); + + iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && + p->header[0] & KVASER_PCIEFD_SPACK_IRM && + cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && + status & KVASER_PCIEFD_KCAN_STAT_IDLE) { + /* Reset detected, send end of flush if no packet are in FIFO */ + u8 count = ioread32(can->reg_base + + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; + + if (!count) + iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, + can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); + } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && + cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) { + /* Response to status request received */ + kvaser_pciefd_handle_status_resp(can, p); + if (can->can.state != CAN_STATE_BUS_OFF && + can->can.state != CAN_STATE_ERROR_ACTIVE) { + mod_timer(&can->bec_poll_timer, + KVASER_PCIEFD_BEC_POLL_FREQ); + } + } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && + !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) { + /* Reset to bus on detected */ + if (!completion_done(&can->start_comp)) + complete(&can->start_comp); + } + + return 0; +} + +static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + /* If this is the last flushed packet, send end of flush */ + if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { + u8 count = ioread32(can->reg_base + + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; + + if (count == 0) + iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, + can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); + } else { + int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; + int dlc = can_get_echo_skb(can->can.dev, echo_idx); + struct net_device_stats *stats = &can->can.dev->stats; + + stats->tx_bytes += dlc; + stats->tx_packets++; + + if (netif_queue_stopped(can->can.dev)) + netif_wake_queue(can->can.dev); + } + + return 0; +} + +static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, + struct kvaser_pciefd_rx_packet *p) +{ + struct sk_buff *skb; + struct net_device_stats *stats = &can->can.dev->stats; + struct can_frame *cf; + + skb = alloc_can_err_skb(can->can.dev, &cf); + + stats->tx_errors++; + if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { + if (skb) + cf->can_id |= CAN_ERR_LOSTARB; + can->can.can_stats.arbitration_lost++; + } else if (skb) { + cf->can_id |= CAN_ERR_ACK; + } + + if (skb) { + cf->can_id |= CAN_ERR_BUSERROR; + stats->rx_bytes += cf->can_dlc; + stats->rx_packets++; + netif_rx(skb); + } else { + stats->rx_dropped++; + netdev_warn(can->can.dev, "No memory left for err_skb\n"); + } +} + +static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + bool one_shot_fail = false; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + /* Ignore control packet ACK */ + if (p->header[0] & KVASER_PCIEFD_APACKET_CT) + return 0; + + if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { + kvaser_pciefd_handle_nack_packet(can, p); + one_shot_fail = true; + } + + if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { + netdev_dbg(can->can.dev, "Packet was flushed\n"); + } else { + int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK; + int dlc = can_get_echo_skb(can->can.dev, echo_idx); + u8 count = ioread32(can->reg_base + + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; + + if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT && + netif_queue_stopped(can->can.dev)) + netif_wake_queue(can->can.dev); + + if (!one_shot_fail) { + struct net_device_stats *stats = &can->can.dev->stats; + + stats->tx_bytes += dlc; + stats->tx_packets++; + } + } + + return 0; +} + +static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, + struct kvaser_pciefd_rx_packet *p) +{ + struct kvaser_pciefd_can *can; + u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7; + + if (ch_id >= pcie->nr_channels) + return -EIO; + + can = pcie->can[ch_id]; + + if (!completion_done(&can->flush_comp)) + complete(&can->flush_comp); + + return 0; +} + +static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, + int dma_buf) +{ + __le32 *buffer = pcie->dma_data[dma_buf]; + __le64 timestamp; + struct kvaser_pciefd_rx_packet packet; + struct kvaser_pciefd_rx_packet *p = &packet; + u8 type; + int pos = *start_pos; + int size; + int ret = 0; + + size = le32_to_cpu(buffer[pos++]); + if (!size) { + *start_pos = 0; + return 0; + } + + p->header[0] = le32_to_cpu(buffer[pos++]); + p->header[1] = le32_to_cpu(buffer[pos++]); + + /* Read 64-bit timestamp */ + memcpy(×tamp, &buffer[pos], sizeof(__le64)); + pos += 2; + p->timestamp = le64_to_cpu(timestamp); + + type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf; + switch (type) { + case KVASER_PCIEFD_PACK_TYPE_DATA: + ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); + if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { + u8 data_len; + + data_len = can_dlc2len(p->header[1] >> + KVASER_PCIEFD_RPACKET_DLC_SHIFT); + pos += DIV_ROUND_UP(data_len, 4); + } + break; + + case KVASER_PCIEFD_PACK_TYPE_ACK: + ret = kvaser_pciefd_handle_ack_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_STATUS: + ret = kvaser_pciefd_handle_status_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_ERROR: + ret = kvaser_pciefd_handle_error_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: + ret = kvaser_pciefd_handle_eack_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: + ret = kvaser_pciefd_handle_eflush_packet(pcie, p); + break; + + case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: + case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: + case KVASER_PCIEFD_PACK_TYPE_TXRQ: + dev_info(&pcie->pci->dev, + "Received unexpected packet type 0x%08X\n", type); + break; + + default: + dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); + ret = -EIO; + break; + } + + if (ret) + return ret; + + /* Position does not point to the end of the package, + * corrupted packet size? + */ + if ((*start_pos + size) != pos) + return -EIO; + + /* Point to the next packet header, if any */ + *start_pos = pos; + + return ret; +} + +static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) +{ + int pos = 0; + int res = 0; + + do { + res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); + } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); + + return res; +} + +static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) +{ + u32 irq; + + irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { + kvaser_pciefd_read_buffer(pcie, 0); + /* Reset DMA buffer 0 */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, + pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + } + + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { + kvaser_pciefd_read_buffer(pcie, 1); + /* Reset DMA buffer 1 */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, + pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + } + + if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF1) + dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); + + iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); + return 0; +} + +static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) +{ + u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) + netdev_err(can->can.dev, "Tx FIFO overflow\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) { + u8 count = ioread32(can->reg_base + + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; + + if (count == 0) + iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, + can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); + } + + if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) + netdev_err(can->can.dev, + "Fail to change bittiming, when not in reset mode\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) + netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); + + if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) + netdev_err(can->can.dev, "Rx FIFO overflow\n"); + + iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); + return 0; +} + +static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) +{ + struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; + u32 board_irq; + int i; + + board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG); + + if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK)) + return IRQ_NONE; + + if (board_irq & KVASER_PCIEFD_IRQ_SRB) + kvaser_pciefd_receive_irq(pcie); + + for (i = 0; i < pcie->nr_channels; i++) { + if (!pcie->can[i]) { + dev_err(&pcie->pci->dev, + "IRQ mask points to unallocated controller\n"); + break; + } + + /* Check that mask matches channel (i) IRQ mask */ + if (board_irq & (1 << i)) + kvaser_pciefd_transmit_irq(pcie->can[i]); + } + + iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG); + return IRQ_HANDLED; +} + +static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) +{ + int i; + struct kvaser_pciefd_can *can; + + for (i = 0; i < pcie->nr_channels; i++) { + can = pcie->can[i]; + if (can) { + iowrite32(0, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + kvaser_pciefd_pwm_stop(can); + free_candev(can->can.dev); + } + } +} + +static int kvaser_pciefd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int err; + struct kvaser_pciefd *pcie; + + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci_set_drvdata(pdev, pcie); + pcie->pci = pdev; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); + if (err) + goto err_disable_pci; + + pcie->reg_base = pci_iomap(pdev, 0, 0); + if (!pcie->reg_base) { + err = -ENOMEM; + goto err_release_regions; + } + + err = kvaser_pciefd_setup_board(pcie); + if (err) + goto err_pci_iounmap; + + err = kvaser_pciefd_setup_dma(pcie); + if (err) + goto err_pci_iounmap; + + pci_set_master(pdev); + + err = kvaser_pciefd_setup_can_ctrls(pcie); + if (err) + goto err_teardown_can_ctrls; + + iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, + pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); + + iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | + KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | + KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, + pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG); + + /* Reset IRQ handling, expected to be off before */ + iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, + pcie->reg_base + KVASER_PCIEFD_IRQ_REG); + iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, + pcie->reg_base + KVASER_PCIEFD_IEN_REG); + + /* Ready the DMA buffers */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, + pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, + pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + + err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, + IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); + if (err) + goto err_teardown_can_ctrls; + + err = kvaser_pciefd_reg_candev(pcie); + if (err) + goto err_free_irq; + + return 0; + +err_free_irq: + free_irq(pcie->pci->irq, pcie); + +err_teardown_can_ctrls: + kvaser_pciefd_teardown_can_ctrls(pcie); + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); + pci_clear_master(pdev); + +err_pci_iounmap: + pci_iounmap(pdev, pcie->reg_base); + +err_release_regions: + pci_release_regions(pdev); + +err_disable_pci: + pci_disable_device(pdev); + + return err; +} + +static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) +{ + struct kvaser_pciefd_can *can; + int i; + + for (i = 0; i < pcie->nr_channels; i++) { + can = pcie->can[i]; + if (can) { + iowrite32(0, + can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); + unregister_candev(can->can.dev); + del_timer(&can->bec_poll_timer); + kvaser_pciefd_pwm_stop(can); + free_candev(can->can.dev); + } + } +} + +static void kvaser_pciefd_remove(struct pci_dev *pdev) +{ + struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); + + kvaser_pciefd_remove_all_ctrls(pcie); + + /* Turn off IRQ generation */ + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); + iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK, + pcie->reg_base + KVASER_PCIEFD_IRQ_REG); + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); + + free_irq(pcie->pci->irq, pcie); + + pci_clear_master(pdev); + pci_iounmap(pdev, pcie->reg_base); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver kvaser_pciefd = { + .name = KVASER_PCIEFD_DRV_NAME, + .id_table = kvaser_pciefd_id_table, + .probe = kvaser_pciefd_probe, + .remove = kvaser_pciefd_remove, +}; + +module_pci_driver(kvaser_pciefd) diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig index ec4b2e117f66..1ff0b7fe81d6 100644 --- a/drivers/net/can/m_can/Kconfig +++ b/drivers/net/can/m_can/Kconfig @@ -1,6 +1,24 @@ # SPDX-License-Identifier: GPL-2.0-only config CAN_M_CAN + tristate "Bosch M_CAN support" + ---help--- + Say Y here if you want support for Bosch M_CAN controller framework. + This is common support for devices that embed the Bosch M_CAN IP. + +config CAN_M_CAN_PLATFORM + tristate "Bosch M_CAN support for io-mapped devices" depends on HAS_IOMEM - tristate "Bosch M_CAN devices" + depends on CAN_M_CAN + ---help--- + Say Y here if you want support for IO Mapped Bosch M_CAN controller. + This support is for devices that have the Bosch M_CAN controller + IP embedded into the device and the IP is IO Mapped to the processor. + +config CAN_M_CAN_TCAN4X5X + depends on CAN_M_CAN + depends on REGMAP_SPI + tristate "TCAN4X5X M_CAN device" ---help--- - Say Y here if you want to support for Bosch M_CAN controller. + Say Y here if you want support for Texas Instruments TCAN4x5x + M_CAN controller. This device is a peripherial device that uses the + SPI bus for communication. diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile index 599ae69cb4a1..52a4a6fbe527 100644 --- a/drivers/net/can/m_can/Makefile +++ b/drivers/net/can/m_can/Makefile @@ -4,3 +4,5 @@ # obj-$(CONFIG_CAN_M_CAN) += m_can.o +obj-$(CONFIG_CAN_M_CAN_PLATFORM) += m_can_platform.o +obj-$(CONFIG_CAN_M_CAN_TCAN4X5X) += tcan4x5x.o diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index deb274a19ba0..562c8317e3aa 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1,20 +1,14 @@ -/* - * CAN bus driver for Bosch M_CAN controller - * - * Copyright (C) 2014 Freescale Semiconductor, Inc. - * Dong Aisheng <b29396@freescale.com> - * - * Bosch M_CAN user manual can be obtained from: +// SPDX-License-Identifier: GPL-2.0 +// CAN bus driver for Bosch M_CAN controller +// Copyright (C) 2014 Freescale Semiconductor, Inc. +// Dong Aisheng <b29396@freescale.com> +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +/* Bosch M_CAN user manual can be obtained from: * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/ * mcan_users_manual_v302.pdf - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ -#include <linux/clk.h> -#include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> @@ -28,11 +22,7 @@ #include <linux/can/dev.h> #include <linux/pinctrl/consumer.h> -/* napi related */ -#define M_CAN_NAPI_WEIGHT 64 - -/* message ram configuration data length */ -#define MRAM_CFG_LEN 8 +#include "m_can.h" /* registers definition */ enum m_can_reg { @@ -86,28 +76,11 @@ enum m_can_reg { M_CAN_TXEFA = 0xf8, }; -/* m_can lec values */ -enum m_can_lec_type { - LEC_NO_ERROR = 0, - LEC_STUFF_ERROR, - LEC_FORM_ERROR, - LEC_ACK_ERROR, - LEC_BIT1_ERROR, - LEC_BIT0_ERROR, - LEC_CRC_ERROR, - LEC_UNUSED, -}; +/* napi related */ +#define M_CAN_NAPI_WEIGHT 64 -enum m_can_mram_cfg { - MRAM_SIDF = 0, - MRAM_XIDF, - MRAM_RXF0, - MRAM_RXF1, - MRAM_RXB, - MRAM_TXE, - MRAM_TXB, - MRAM_CFG_NUM, -}; +/* message ram configuration data length */ +#define MRAM_CFG_LEN 8 /* Core Release Register (CREL) */ #define CREL_REL_SHIFT 28 @@ -347,90 +320,85 @@ enum m_can_mram_cfg { #define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT #define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT) -/* address offset and element number for each FIFO/Buffer in the Message RAM */ -struct mram_cfg { - u16 off; - u8 num; -}; - -/* m_can private data structure */ -struct m_can_priv { - struct can_priv can; /* must be the first member */ - struct napi_struct napi; - struct net_device *dev; - struct device *device; - struct clk *hclk; - struct clk *cclk; - void __iomem *base; - u32 irqstatus; - int version; - - /* message ram configuration */ - void __iomem *mram_base; - struct mram_cfg mcfg[MRAM_CFG_NUM]; -}; +static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg) +{ + return cdev->ops->read_reg(cdev, reg); +} -static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg) +static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg, + u32 val) { - return readl(priv->base + reg); + cdev->ops->write_reg(cdev, reg, val); } -static inline void m_can_write(const struct m_can_priv *priv, - enum m_can_reg reg, u32 val) +static u32 m_can_fifo_read(struct m_can_classdev *cdev, + u32 fgi, unsigned int offset) { - writel(val, priv->base + reg); + u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE + + offset; + + return cdev->ops->read_fifo(cdev, addr_offset); } -static inline u32 m_can_fifo_read(const struct m_can_priv *priv, - u32 fgi, unsigned int offset) +static void m_can_fifo_write(struct m_can_classdev *cdev, + u32 fpi, unsigned int offset, u32 val) { - return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off + - fgi * RXF0_ELEMENT_SIZE + offset); + u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE + + offset; + + cdev->ops->write_fifo(cdev, addr_offset, val); } -static inline void m_can_fifo_write(const struct m_can_priv *priv, - u32 fpi, unsigned int offset, u32 val) +static inline void m_can_fifo_write_no_off(struct m_can_classdev *cdev, + u32 fpi, u32 val) { - writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off + - fpi * TXB_ELEMENT_SIZE + offset); + cdev->ops->write_fifo(cdev, fpi, val); } -static inline u32 m_can_txe_fifo_read(const struct m_can_priv *priv, - u32 fgi, - u32 offset) { - return readl(priv->mram_base + priv->mcfg[MRAM_TXE].off + - fgi * TXE_ELEMENT_SIZE + offset); +static u32 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset) +{ + u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE + + offset; + + return cdev->ops->read_fifo(cdev, addr_offset); } -static inline bool m_can_tx_fifo_full(const struct m_can_priv *priv) +static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev) { - return !!(m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQF); + return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF); } -static inline void m_can_config_endisable(const struct m_can_priv *priv, - bool enable) +void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) { - u32 cccr = m_can_read(priv, M_CAN_CCCR); + u32 cccr = m_can_read(cdev, M_CAN_CCCR); u32 timeout = 10; u32 val = 0; + /* Clear the Clock stop request if it was set */ + if (cccr & CCCR_CSR) + cccr &= ~CCCR_CSR; + if (enable) { + /* Clear the Clock stop request if it was set */ + if (cccr & CCCR_CSR) + cccr &= ~CCCR_CSR; + /* enable m_can configuration */ - m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); + m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT); udelay(5); /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ - m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); + m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); } else { - m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); + m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); } /* there's a delay for module initialization */ if (enable) val = CCCR_INIT | CCCR_CCE; - while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { + while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { if (timeout == 0) { - netdev_warn(priv->dev, "Failed to init module\n"); + netdev_warn(cdev->net, "Failed to init module\n"); return; } timeout--; @@ -438,21 +406,38 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv, } } -static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv) +static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) { /* Only interrupt line 0 is used in this driver */ - m_can_write(priv, M_CAN_ILE, ILE_EINT0); + m_can_write(cdev, M_CAN_ILE, ILE_EINT0); } -static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) +static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) { - m_can_write(priv, M_CAN_ILE, 0x0); + m_can_write(cdev, M_CAN_ILE, 0x0); +} + +static void m_can_clean(struct net_device *net) +{ + struct m_can_classdev *cdev = netdev_priv(net); + + if (cdev->tx_skb) { + int putidx = 0; + + net->stats.tx_errors++; + if (cdev->version > 30) + putidx = ((m_can_read(cdev, M_CAN_TXFQS) & + TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT); + + can_free_echo_skb(cdev->net, putidx); + cdev->tx_skb = NULL; + } } static void m_can_read_fifo(struct net_device *dev, u32 rxfs) { struct net_device_stats *stats = &dev->stats; - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct canfd_frame *cf; struct sk_buff *skb; u32 id, fgi, dlc; @@ -460,7 +445,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) /* calculate the fifo get index for where to read data */ fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT; - dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); + dlc = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DLC); if (dlc & RX_BUF_FDF) skb = alloc_canfd_skb(dev, &cf); else @@ -475,7 +460,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) else cf->len = get_can_dlc((dlc >> 16) & 0x0F); - id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); + id = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID); if (id & RX_BUF_XTD) cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; else @@ -494,12 +479,12 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) for (i = 0; i < cf->len; i += 4) *(u32 *)(cf->data + i) = - m_can_fifo_read(priv, fgi, + m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA(i / 4)); } /* acknowledge rx fifo 0 */ - m_can_write(priv, M_CAN_RXF0A, fgi); + m_can_write(cdev, M_CAN_RXF0A, fgi); stats->rx_packets++; stats->rx_bytes += cf->len; @@ -509,11 +494,11 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) static int m_can_do_rx_poll(struct net_device *dev, int quota) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); u32 pkts = 0; u32 rxfs; - rxfs = m_can_read(priv, M_CAN_RXF0S); + rxfs = m_can_read(cdev, M_CAN_RXF0S); if (!(rxfs & RXFS_FFL_MASK)) { netdev_dbg(dev, "no messages in fifo0\n"); return 0; @@ -527,7 +512,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) quota--; pkts++; - rxfs = m_can_read(priv, M_CAN_RXF0S); + rxfs = m_can_read(cdev, M_CAN_RXF0S); } if (pkts) @@ -562,12 +547,12 @@ static int m_can_handle_lost_msg(struct net_device *dev) static int m_can_handle_lec_err(struct net_device *dev, enum m_can_lec_type lec_type) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; - priv->can.can_stats.bus_error++; + cdev->can.can_stats.bus_error++; stats->rx_errors++; /* propagate the error condition to the CAN stack */ @@ -619,47 +604,51 @@ static int m_can_handle_lec_err(struct net_device *dev, static int __m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); unsigned int ecr; - ecr = m_can_read(priv, M_CAN_ECR); + ecr = m_can_read(cdev, M_CAN_ECR); bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT; return 0; } -static int m_can_clk_start(struct m_can_priv *priv) +static int m_can_clk_start(struct m_can_classdev *cdev) { int err; - err = pm_runtime_get_sync(priv->device); + if (cdev->pm_clock_support == 0) + return 0; + + err = pm_runtime_get_sync(cdev->dev); if (err < 0) { - pm_runtime_put_noidle(priv->device); + pm_runtime_put_noidle(cdev->dev); return err; } return 0; } -static void m_can_clk_stop(struct m_can_priv *priv) +static void m_can_clk_stop(struct m_can_classdev *cdev) { - pm_runtime_put_sync(priv->device); + if (cdev->pm_clock_support) + pm_runtime_put_sync(cdev->dev); } static int m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); int err; - err = m_can_clk_start(priv); + err = m_can_clk_start(cdev); if (err) return err; __m_can_get_berr_counter(dev, bec); - m_can_clk_stop(priv); + m_can_clk_stop(cdev); return 0; } @@ -667,7 +656,7 @@ static int m_can_get_berr_counter(const struct net_device *dev, static int m_can_handle_state_change(struct net_device *dev, enum can_state new_state) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; @@ -677,19 +666,19 @@ static int m_can_handle_state_change(struct net_device *dev, switch (new_state) { case CAN_STATE_ERROR_ACTIVE: /* error warning state */ - priv->can.can_stats.error_warning++; - priv->can.state = CAN_STATE_ERROR_WARNING; + cdev->can.can_stats.error_warning++; + cdev->can.state = CAN_STATE_ERROR_WARNING; break; case CAN_STATE_ERROR_PASSIVE: /* error passive state */ - priv->can.can_stats.error_passive++; - priv->can.state = CAN_STATE_ERROR_PASSIVE; + cdev->can.can_stats.error_passive++; + cdev->can.state = CAN_STATE_ERROR_PASSIVE; break; case CAN_STATE_BUS_OFF: /* bus-off state */ - priv->can.state = CAN_STATE_BUS_OFF; - m_can_disable_all_interrupts(priv); - priv->can.can_stats.bus_off++; + cdev->can.state = CAN_STATE_BUS_OFF; + m_can_disable_all_interrupts(cdev); + cdev->can.can_stats.bus_off++; can_bus_off(dev); break; default: @@ -716,7 +705,7 @@ static int m_can_handle_state_change(struct net_device *dev, case CAN_STATE_ERROR_PASSIVE: /* error passive state */ cf->can_id |= CAN_ERR_CRTL; - ecr = m_can_read(priv, M_CAN_ECR); + ecr = m_can_read(cdev, M_CAN_ECR); if (ecr & ECR_RP) cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; if (bec.txerr > 127) @@ -741,25 +730,22 @@ static int m_can_handle_state_change(struct net_device *dev, static int m_can_handle_state_errors(struct net_device *dev, u32 psr) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); int work_done = 0; - if ((psr & PSR_EW) && - (priv->can.state != CAN_STATE_ERROR_WARNING)) { + if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) { netdev_dbg(dev, "entered error warning state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_ERROR_WARNING); } - if ((psr & PSR_EP) && - (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { + if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) { netdev_dbg(dev, "entered error passive state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_ERROR_PASSIVE); } - if ((psr & PSR_BO) && - (priv->can.state != CAN_STATE_BUS_OFF)) { + if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) { netdev_dbg(dev, "entered error bus off state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_BUS_OFF); @@ -794,14 +780,14 @@ static inline bool is_lec_err(u32 psr) static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, u32 psr) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); int work_done = 0; if (irqstatus & IR_RF0L) work_done += m_can_handle_lost_msg(dev); /* handle lec errors on the bus */ - if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && + if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && is_lec_err(psr)) work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED); @@ -811,14 +797,13 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, return work_done; } -static int m_can_poll(struct napi_struct *napi, int quota) +static int m_can_rx_handler(struct net_device *dev, int quota) { - struct net_device *dev = napi->dev; - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); int work_done = 0; u32 irqstatus, psr; - irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR); + irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR); if (!irqstatus) goto end; @@ -832,18 +817,19 @@ static int m_can_poll(struct napi_struct *napi, int quota) * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127. * In this case, reset MCAN_IR.MRAF. No further action is required. */ - if ((priv->version <= 31) && (irqstatus & IR_MRAF) && - (m_can_read(priv, M_CAN_ECR) & ECR_RP)) { + if (cdev->version <= 31 && irqstatus & IR_MRAF && + m_can_read(cdev, M_CAN_ECR) & ECR_RP) { struct can_berr_counter bec; __m_can_get_berr_counter(dev, &bec); if (bec.rxerr == 127) { - m_can_write(priv, M_CAN_IR, IR_MRAF); + m_can_write(cdev, M_CAN_IR, IR_MRAF); irqstatus &= ~IR_MRAF; } } - psr = m_can_read(priv, M_CAN_PSR); + psr = m_can_read(cdev, M_CAN_PSR); + if (irqstatus & IR_ERR_STATE) work_done += m_can_handle_state_errors(dev, psr); @@ -852,13 +838,33 @@ static int m_can_poll(struct napi_struct *napi, int quota) if (irqstatus & IR_RF0N) work_done += m_can_do_rx_poll(dev, (quota - work_done)); +end: + return work_done; +} + +static int m_can_rx_peripheral(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + m_can_rx_handler(dev, 1); + + m_can_enable_all_interrupts(cdev); + + return 0; +} +static int m_can_poll(struct napi_struct *napi, int quota) +{ + struct net_device *dev = napi->dev; + struct m_can_classdev *cdev = netdev_priv(dev); + int work_done; + + work_done = m_can_rx_handler(dev, quota); if (work_done < quota) { napi_complete_done(napi, work_done); - m_can_enable_all_interrupts(priv); + m_can_enable_all_interrupts(cdev); } -end: return work_done; } @@ -870,11 +876,11 @@ static void m_can_echo_tx_event(struct net_device *dev) int i = 0; unsigned int msg_mark; - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; /* read tx event fifo status */ - m_can_txefs = m_can_read(priv, M_CAN_TXEFS); + m_can_txefs = m_can_read(cdev, M_CAN_TXEFS); /* Get Tx Event fifo element count */ txe_count = (m_can_txefs & TXEFS_EFFL_MASK) @@ -883,15 +889,15 @@ static void m_can_echo_tx_event(struct net_device *dev) /* Get and process all sent elements */ for (i = 0; i < txe_count; i++) { /* retrieve get index */ - fgi = (m_can_read(priv, M_CAN_TXEFS) & TXEFS_EFGI_MASK) + fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK) >> TXEFS_EFGI_SHIFT; /* get message marker */ - msg_mark = (m_can_txe_fifo_read(priv, fgi, 4) & + msg_mark = (m_can_txe_fifo_read(cdev, fgi, 4) & TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT; /* ack txe element */ - m_can_write(priv, M_CAN_TXEFA, (TXEFA_EFAI_MASK & + m_can_write(cdev, M_CAN_TXEFA, (TXEFA_EFAI_MASK & (fgi << TXEFA_EFAI_SHIFT))); /* update stats */ @@ -903,17 +909,20 @@ static void m_can_echo_tx_event(struct net_device *dev) static irqreturn_t m_can_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; u32 ir; - ir = m_can_read(priv, M_CAN_IR); + ir = m_can_read(cdev, M_CAN_IR); if (!ir) return IRQ_NONE; /* ACK all irqs */ if (ir & IR_ALL_INT) - m_can_write(priv, M_CAN_IR, ir); + m_can_write(cdev, M_CAN_IR, ir); + + if (cdev->ops->clear_interrupts) + cdev->ops->clear_interrupts(cdev); /* schedule NAPI in case of * - rx IRQ @@ -921,12 +930,15 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) * - bus error IRQ and bus error reporting */ if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) { - priv->irqstatus = ir; - m_can_disable_all_interrupts(priv); - napi_schedule(&priv->napi); + cdev->irqstatus = ir; + m_can_disable_all_interrupts(cdev); + if (!cdev->is_peripheral) + napi_schedule(&cdev->napi); + else + m_can_rx_peripheral(dev); } - if (priv->version == 30) { + if (cdev->version == 30) { if (ir & IR_TC) { /* Transmission Complete Interrupt*/ stats->tx_bytes += can_get_echo_skb(dev, 0); @@ -940,7 +952,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) m_can_echo_tx_event(dev); can_led_event(dev, CAN_LED_EVENT_TX); if (netif_queue_stopped(dev) && - !m_can_tx_fifo_full(priv)) + !m_can_tx_fifo_full(cdev)) netif_wake_queue(dev); } } @@ -998,9 +1010,9 @@ static const struct can_bittiming_const m_can_data_bittiming_const_31X = { static int m_can_set_bittiming(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); - const struct can_bittiming *bt = &priv->can.bittiming; - const struct can_bittiming *dbt = &priv->can.data_bittiming; + struct m_can_classdev *cdev = netdev_priv(dev); + const struct can_bittiming *bt = &cdev->can.bittiming; + const struct can_bittiming *dbt = &cdev->can.data_bittiming; u16 brp, sjw, tseg1, tseg2; u32 reg_btp; @@ -1010,9 +1022,9 @@ static int m_can_set_bittiming(struct net_device *dev) tseg2 = bt->phase_seg2 - 1; reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) | (tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT); - m_can_write(priv, M_CAN_NBTP, reg_btp); + m_can_write(cdev, M_CAN_NBTP, reg_btp); - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { reg_btp = 0; brp = dbt->brp - 1; sjw = dbt->sjw - 1; @@ -1034,7 +1046,7 @@ static int m_can_set_bittiming(struct net_device *dev) /* Equation based on Bosch's M_CAN User Manual's * Transmitter Delay Compensation Section */ - tdco = (priv->can.clock.freq / 1000) * + tdco = (cdev->can.clock.freq / 1000) * ssp / dbt->bitrate; /* Max valid TDCO value is 127 */ @@ -1045,7 +1057,7 @@ static int m_can_set_bittiming(struct net_device *dev) } reg_btp |= DBTP_TDC; - m_can_write(priv, M_CAN_TDCR, + m_can_write(cdev, M_CAN_TDCR, tdco << TDCR_TDCO_SHIFT); } @@ -1054,7 +1066,7 @@ static int m_can_set_bittiming(struct net_device *dev) (tseg1 << DBTP_DTSEG1_SHIFT) | (tseg2 << DBTP_DTSEG2_SHIFT); - m_can_write(priv, M_CAN_DBTP, reg_btp); + m_can_write(cdev, M_CAN_DBTP, reg_btp); } return 0; @@ -1071,63 +1083,63 @@ static int m_can_set_bittiming(struct net_device *dev) */ static void m_can_chip_config(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); u32 cccr, test; - m_can_config_endisable(priv, true); + m_can_config_endisable(cdev, true); /* RX Buffer/FIFO Element Size 64 bytes data field */ - m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES); + m_can_write(cdev, M_CAN_RXESC, M_CAN_RXESC_64BYTES); /* Accept Non-matching Frames Into FIFO 0 */ - m_can_write(priv, M_CAN_GFC, 0x0); + m_can_write(cdev, M_CAN_GFC, 0x0); - if (priv->version == 30) { + if (cdev->version == 30) { /* only support one Tx Buffer currently */ - m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) | - priv->mcfg[MRAM_TXB].off); + m_can_write(cdev, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) | + cdev->mcfg[MRAM_TXB].off); } else { /* TX FIFO is used for newer IP Core versions */ - m_can_write(priv, M_CAN_TXBC, - (priv->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) | - (priv->mcfg[MRAM_TXB].off)); + m_can_write(cdev, M_CAN_TXBC, + (cdev->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) | + (cdev->mcfg[MRAM_TXB].off)); } /* support 64 bytes payload */ - m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES); + m_can_write(cdev, M_CAN_TXESC, TXESC_TBDS_64BYTES); /* TX Event FIFO */ - if (priv->version == 30) { - m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) | - priv->mcfg[MRAM_TXE].off); + if (cdev->version == 30) { + m_can_write(cdev, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) | + cdev->mcfg[MRAM_TXE].off); } else { /* Full TX Event FIFO is used */ - m_can_write(priv, M_CAN_TXEFC, - ((priv->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT) + m_can_write(cdev, M_CAN_TXEFC, + ((cdev->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT) & TXEFC_EFS_MASK) | - priv->mcfg[MRAM_TXE].off); + cdev->mcfg[MRAM_TXE].off); } /* rx fifo configuration, blocking mode, fifo size 1 */ - m_can_write(priv, M_CAN_RXF0C, - (priv->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) | - priv->mcfg[MRAM_RXF0].off); + m_can_write(cdev, M_CAN_RXF0C, + (cdev->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) | + cdev->mcfg[MRAM_RXF0].off); - m_can_write(priv, M_CAN_RXF1C, - (priv->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) | - priv->mcfg[MRAM_RXF1].off); + m_can_write(cdev, M_CAN_RXF1C, + (cdev->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) | + cdev->mcfg[MRAM_RXF1].off); - cccr = m_can_read(priv, M_CAN_CCCR); - test = m_can_read(priv, M_CAN_TEST); + cccr = m_can_read(cdev, M_CAN_CCCR); + test = m_can_read(cdev, M_CAN_TEST); test &= ~TEST_LBCK; - if (priv->version == 30) { + if (cdev->version == 30) { /* Version 3.0.x */ cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | (CCCR_CME_MASK << CCCR_CME_SHIFT)); - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; } else { @@ -1136,64 +1148,68 @@ static void m_can_chip_config(struct net_device *dev) CCCR_NISO); /* Only 3.2.x has NISO Bit implemented */ - if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) cccr |= CCCR_NISO; - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) cccr |= (CCCR_BRSE | CCCR_FDOE); } /* Loopback Mode */ - if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { + if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { cccr |= CCCR_TEST | CCCR_MON; test |= TEST_LBCK; } /* Enable Monitoring (all versions) */ - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) cccr |= CCCR_MON; /* Write config */ - m_can_write(priv, M_CAN_CCCR, cccr); - m_can_write(priv, M_CAN_TEST, test); + m_can_write(cdev, M_CAN_CCCR, cccr); + m_can_write(cdev, M_CAN_TEST, test); /* Enable interrupts */ - m_can_write(priv, M_CAN_IR, IR_ALL_INT); - if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) - if (priv->version == 30) - m_can_write(priv, M_CAN_IE, IR_ALL_INT & + m_can_write(cdev, M_CAN_IR, IR_ALL_INT); + if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) + if (cdev->version == 30) + m_can_write(cdev, M_CAN_IE, IR_ALL_INT & ~(IR_ERR_LEC_30X)); else - m_can_write(priv, M_CAN_IE, IR_ALL_INT & + m_can_write(cdev, M_CAN_IE, IR_ALL_INT & ~(IR_ERR_LEC_31X)); else - m_can_write(priv, M_CAN_IE, IR_ALL_INT); + m_can_write(cdev, M_CAN_IE, IR_ALL_INT); /* route all interrupts to INT0 */ - m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0); + m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0); /* set bittiming params */ m_can_set_bittiming(dev); - m_can_config_endisable(priv, false); + m_can_config_endisable(cdev, false); + + if (cdev->ops->init) + cdev->ops->init(cdev); } static void m_can_start(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); /* basic m_can configuration */ m_can_chip_config(dev); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + cdev->can.state = CAN_STATE_ERROR_ACTIVE; - m_can_enable_all_interrupts(priv); + m_can_enable_all_interrupts(cdev); } static int m_can_set_mode(struct net_device *dev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: + m_can_clean(dev); m_can_start(dev); netif_wake_queue(dev); break; @@ -1209,20 +1225,17 @@ static int m_can_set_mode(struct net_device *dev, enum can_mode mode) * else it returns the release and step coded as: * return value = 10 * <release> + 1 * <step> */ -static int m_can_check_core_release(void __iomem *m_can_base) +static int m_can_check_core_release(struct m_can_classdev *cdev) { u32 crel_reg; u8 rel; u8 step; int res; - struct m_can_priv temp_priv = { - .base = m_can_base - }; /* Read Core Release Version and split into version number * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1; */ - crel_reg = m_can_read(&temp_priv, M_CAN_CREL); + crel_reg = m_can_read(cdev, M_CAN_CREL); rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT); step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT); @@ -1240,152 +1253,142 @@ static int m_can_check_core_release(void __iomem *m_can_base) /* Selectable Non ISO support only in version 3.2.x * This function checks if the bit is writable. */ -static bool m_can_niso_supported(const struct m_can_priv *priv) +static bool m_can_niso_supported(struct m_can_classdev *cdev) { - u32 cccr_reg, cccr_poll; - int niso_timeout; + u32 cccr_reg, cccr_poll = 0; + int niso_timeout = -ETIMEDOUT; + int i; - m_can_config_endisable(priv, true); - cccr_reg = m_can_read(priv, M_CAN_CCCR); + m_can_config_endisable(cdev, true); + cccr_reg = m_can_read(cdev, M_CAN_CCCR); cccr_reg |= CCCR_NISO; - m_can_write(priv, M_CAN_CCCR, cccr_reg); + m_can_write(cdev, M_CAN_CCCR, cccr_reg); - niso_timeout = readl_poll_timeout((priv->base + M_CAN_CCCR), cccr_poll, - (cccr_poll == cccr_reg), 0, 10); + for (i = 0; i <= 10; i++) { + cccr_poll = m_can_read(cdev, M_CAN_CCCR); + if (cccr_poll == cccr_reg) { + niso_timeout = 0; + break; + } + + usleep_range(1, 5); + } /* Clear NISO */ cccr_reg &= ~(CCCR_NISO); - m_can_write(priv, M_CAN_CCCR, cccr_reg); + m_can_write(cdev, M_CAN_CCCR, cccr_reg); - m_can_config_endisable(priv, false); + m_can_config_endisable(cdev, false); /* return false if time out (-ETIMEDOUT), else return true */ return !niso_timeout; } -static int m_can_dev_setup(struct platform_device *pdev, struct net_device *dev, - void __iomem *addr) +static int m_can_dev_setup(struct m_can_classdev *m_can_dev) { - struct m_can_priv *priv; + struct net_device *dev = m_can_dev->net; int m_can_version; - m_can_version = m_can_check_core_release(addr); + m_can_version = m_can_check_core_release(m_can_dev); /* return if unsupported version */ if (!m_can_version) { - dev_err(&pdev->dev, "Unsupported version number: %2d", + dev_err(m_can_dev->dev, "Unsupported version number: %2d", m_can_version); return -EINVAL; } - priv = netdev_priv(dev); - netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT); + if (!m_can_dev->is_peripheral) + netif_napi_add(dev, &m_can_dev->napi, + m_can_poll, M_CAN_NAPI_WEIGHT); /* Shared properties of all M_CAN versions */ - priv->version = m_can_version; - priv->dev = dev; - priv->base = addr; - priv->can.do_set_mode = m_can_set_mode; - priv->can.do_get_berr_counter = m_can_get_berr_counter; + m_can_dev->version = m_can_version; + m_can_dev->can.do_set_mode = m_can_set_mode; + m_can_dev->can.do_get_berr_counter = m_can_get_berr_counter; /* Set M_CAN supported operations */ - priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + m_can_dev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD; /* Set properties depending on M_CAN version */ - switch (priv->version) { + switch (m_can_dev->version) { case 30: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - priv->can.bittiming_const = &m_can_bittiming_const_30X; - priv->can.data_bittiming_const = - &m_can_data_bittiming_const_30X; + m_can_dev->can.bittiming_const = m_can_dev->bit_timing ? + m_can_dev->bit_timing : &m_can_bittiming_const_30X; + + m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ? + m_can_dev->data_timing : + &m_can_data_bittiming_const_30X; break; case 31: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - priv->can.bittiming_const = &m_can_bittiming_const_31X; - priv->can.data_bittiming_const = - &m_can_data_bittiming_const_31X; + m_can_dev->can.bittiming_const = m_can_dev->bit_timing ? + m_can_dev->bit_timing : &m_can_bittiming_const_31X; + + m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ? + m_can_dev->data_timing : + &m_can_data_bittiming_const_31X; break; case 32: - priv->can.bittiming_const = &m_can_bittiming_const_31X; - priv->can.data_bittiming_const = - &m_can_data_bittiming_const_31X; - priv->can.ctrlmode_supported |= (m_can_niso_supported(priv) + m_can_dev->can.bittiming_const = m_can_dev->bit_timing ? + m_can_dev->bit_timing : &m_can_bittiming_const_31X; + + m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ? + m_can_dev->data_timing : + &m_can_data_bittiming_const_31X; + + m_can_dev->can.ctrlmode_supported |= + (m_can_niso_supported(m_can_dev) ? CAN_CTRLMODE_FD_NON_ISO : 0); break; default: - dev_err(&pdev->dev, "Unsupported version number: %2d", - priv->version); + dev_err(m_can_dev->dev, "Unsupported version number: %2d", + m_can_dev->version); return -EINVAL; } - return 0; -} - -static int m_can_open(struct net_device *dev) -{ - struct m_can_priv *priv = netdev_priv(dev); - int err; - - err = m_can_clk_start(priv); - if (err) - return err; - - /* open the can device */ - err = open_candev(dev); - if (err) { - netdev_err(dev, "failed to open can device\n"); - goto exit_disable_clks; - } - - /* register interrupt handler */ - err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, - dev); - if (err < 0) { - netdev_err(dev, "failed to request interrupt\n"); - goto exit_irq_fail; - } - - /* start the m_can controller */ - m_can_start(dev); - - can_led_event(dev, CAN_LED_EVENT_OPEN); - napi_enable(&priv->napi); - netif_start_queue(dev); + if (m_can_dev->ops->init) + m_can_dev->ops->init(m_can_dev); return 0; - -exit_irq_fail: - close_candev(dev); -exit_disable_clks: - m_can_clk_stop(priv); - return err; } static void m_can_stop(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); /* disable all interrupts */ - m_can_disable_all_interrupts(priv); + m_can_disable_all_interrupts(cdev); /* set the state as STOPPED */ - priv->can.state = CAN_STATE_STOPPED; + cdev->can.state = CAN_STATE_STOPPED; } static int m_can_close(struct net_device *dev) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); netif_stop_queue(dev); - napi_disable(&priv->napi); + + if (!cdev->is_peripheral) + napi_disable(&cdev->napi); + m_can_stop(dev); - m_can_clk_stop(priv); + m_can_clk_stop(cdev); free_irq(dev->irq, dev); + + if (cdev->is_peripheral) { + cdev->tx_skb = NULL; + destroy_workqueue(cdev->tx_wq); + cdev->tx_wq = NULL; + } + close_candev(dev); can_led_event(dev, CAN_LED_EVENT_STOP); @@ -1394,30 +1397,27 @@ static int m_can_close(struct net_device *dev) static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx) { - struct m_can_priv *priv = netdev_priv(dev); + struct m_can_classdev *cdev = netdev_priv(dev); /*get wrap around for loopback skb index */ - unsigned int wrap = priv->can.echo_skb_max; + unsigned int wrap = cdev->can.echo_skb_max; int next_idx; /* calculate next index */ next_idx = (++putidx >= wrap ? 0 : putidx); /* check if occupied */ - return !!priv->can.echo_skb[next_idx]; + return !!cdev->can.echo_skb[next_idx]; } -static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, - struct net_device *dev) +static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) { - struct m_can_priv *priv = netdev_priv(dev); - struct canfd_frame *cf = (struct canfd_frame *)skb->data; + struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; + struct net_device *dev = cdev->net; + struct sk_buff *skb = cdev->tx_skb; u32 id, cccr, fdflags; int i; int putidx; - if (can_dropped_invalid_skb(dev, skb)) - return NETDEV_TX_OK; - /* Generate ID field for TX buffer Element */ /* Common to all supported M_CAN versions */ if (cf->can_id & CAN_EFF_FLAG) { @@ -1430,23 +1430,23 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_RTR_FLAG) id |= TX_BUF_RTR; - if (priv->version == 30) { + if (cdev->version == 30) { netif_stop_queue(dev); /* message ram configuration */ - m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, + m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, id); + m_can_fifo_write(cdev, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16); for (i = 0; i < cf->len; i += 4) - m_can_fifo_write(priv, 0, + m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA(i / 4), *(u32 *)(cf->data + i)); can_put_echo_skb(skb, dev, 0); - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { - cccr = m_can_read(priv, M_CAN_CCCR); + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + cccr = m_can_read(cdev, M_CAN_CCCR); cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); if (can_is_canfd_skb(skb)) { if (cf->flags & CANFD_BRS) @@ -1458,28 +1458,35 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, } else { cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; } - m_can_write(priv, M_CAN_CCCR, cccr); + m_can_write(cdev, M_CAN_CCCR, cccr); } - m_can_write(priv, M_CAN_TXBTIE, 0x1); - m_can_write(priv, M_CAN_TXBAR, 0x1); + m_can_write(cdev, M_CAN_TXBTIE, 0x1); + m_can_write(cdev, M_CAN_TXBAR, 0x1); /* End of xmit function for version 3.0.x */ } else { /* Transmit routine for version >= v3.1.x */ /* Check if FIFO full */ - if (m_can_tx_fifo_full(priv)) { + if (m_can_tx_fifo_full(cdev)) { /* This shouldn't happen */ netif_stop_queue(dev); netdev_warn(dev, "TX queue active although FIFO is full."); - return NETDEV_TX_BUSY; + + if (cdev->is_peripheral) { + kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } else { + return NETDEV_TX_BUSY; + } } /* get put index for frame */ - putidx = ((m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQPI_MASK) + putidx = ((m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT); /* Write ID Field to FIFO Element */ - m_can_fifo_write(priv, putidx, M_CAN_FIFO_ID, id); + m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, id); /* get CAN FD configuration of frame */ fdflags = 0; @@ -1494,14 +1501,14 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, * it is used in TX interrupt for * sending the correct echo frame */ - m_can_fifo_write(priv, putidx, M_CAN_FIFO_DLC, + m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC, ((putidx << TX_BUF_MM_SHIFT) & TX_BUF_MM_MASK) | (can_len2dlc(cf->len) << 16) | fdflags | TX_BUF_EFC); for (i = 0; i < cf->len; i += 4) - m_can_fifo_write(priv, putidx, M_CAN_FIFO_DATA(i / 4), + m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA(i / 4), *(u32 *)(cf->data + i)); /* Push loopback echo. @@ -1510,17 +1517,123 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, can_put_echo_skb(skb, dev, putidx); /* Enable TX FIFO element to start transfer */ - m_can_write(priv, M_CAN_TXBAR, (1 << putidx)); + m_can_write(cdev, M_CAN_TXBAR, (1 << putidx)); /* stop network queue if fifo full */ - if (m_can_tx_fifo_full(priv) || - m_can_next_echo_skb_occupied(dev, putidx)) - netif_stop_queue(dev); + if (m_can_tx_fifo_full(cdev) || + m_can_next_echo_skb_occupied(dev, putidx)) + netif_stop_queue(dev); } return NETDEV_TX_OK; } +static void m_can_tx_work_queue(struct work_struct *ws) +{ + struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev, + tx_work); + + m_can_tx_handler(cdev); + cdev->tx_skb = NULL; +} + +static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + if (cdev->is_peripheral) { + if (cdev->tx_skb) { + netdev_err(dev, "hard_xmit called while tx busy\n"); + return NETDEV_TX_BUSY; + } + + if (cdev->can.state == CAN_STATE_BUS_OFF) { + m_can_clean(dev); + } else { + /* Need to stop the queue to avoid numerous requests + * from being sent. Suggested improvement is to create + * a queueing mechanism that will queue the skbs and + * process them in order. + */ + cdev->tx_skb = skb; + netif_stop_queue(cdev->net); + queue_work(cdev->tx_wq, &cdev->tx_work); + } + } else { + cdev->tx_skb = skb; + return m_can_tx_handler(cdev); + } + + return NETDEV_TX_OK; +} + +static int m_can_open(struct net_device *dev) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + int err; + + err = m_can_clk_start(cdev); + if (err) + return err; + + /* open the can device */ + err = open_candev(dev); + if (err) { + netdev_err(dev, "failed to open can device\n"); + goto exit_disable_clks; + } + + /* register interrupt handler */ + if (cdev->is_peripheral) { + cdev->tx_skb = NULL; + cdev->tx_wq = alloc_workqueue("mcan_wq", + WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); + if (!cdev->tx_wq) { + err = -ENOMEM; + goto out_wq_fail; + } + + INIT_WORK(&cdev->tx_work, m_can_tx_work_queue); + + err = request_threaded_irq(dev->irq, NULL, m_can_isr, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + dev->name, dev); + } else { + err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, + dev); + } + + if (err < 0) { + netdev_err(dev, "failed to request interrupt\n"); + goto exit_irq_fail; + } + + /* start the m_can controller */ + m_can_start(dev); + + can_led_event(dev, CAN_LED_EVENT_OPEN); + + if (!cdev->is_peripheral) + napi_enable(&cdev->napi); + + netif_start_queue(dev); + + return 0; + +exit_irq_fail: + if (cdev->is_peripheral) + destroy_workqueue(cdev->tx_wq); +out_wq_fail: + close_candev(dev); +exit_disable_clks: + m_can_clk_stop(cdev); + return err; +} + static const struct net_device_ops m_can_netdev_ops = { .ndo_open = m_can_open, .ndo_stop = m_can_close, @@ -1536,114 +1649,91 @@ static int register_m_can_dev(struct net_device *dev) return register_candev(dev); } -static void m_can_init_ram(struct m_can_priv *priv) -{ - int end, i, start; - - /* initialize the entire Message RAM in use to avoid possible - * ECC/parity checksum errors when reading an uninitialized buffer - */ - start = priv->mcfg[MRAM_SIDF].off; - end = priv->mcfg[MRAM_TXB].off + - priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; - for (i = start; i < end; i += 4) - writel(0x0, priv->mram_base + i); -} - -static void m_can_of_parse_mram(struct m_can_priv *priv, +static void m_can_of_parse_mram(struct m_can_classdev *cdev, const u32 *mram_config_vals) { - priv->mcfg[MRAM_SIDF].off = mram_config_vals[0]; - priv->mcfg[MRAM_SIDF].num = mram_config_vals[1]; - priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off + - priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; - priv->mcfg[MRAM_XIDF].num = mram_config_vals[2]; - priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off + - priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; - priv->mcfg[MRAM_RXF0].num = mram_config_vals[3] & + cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0]; + cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1]; + cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off + + cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; + cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2]; + cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off + + cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] & (RXFC_FS_MASK >> RXFC_FS_SHIFT); - priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off + - priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; - priv->mcfg[MRAM_RXF1].num = mram_config_vals[4] & + cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off + + cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] & (RXFC_FS_MASK >> RXFC_FS_SHIFT); - priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off + - priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; - priv->mcfg[MRAM_RXB].num = mram_config_vals[5]; - priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off + - priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; - priv->mcfg[MRAM_TXE].num = mram_config_vals[6]; - priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off + - priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; - priv->mcfg[MRAM_TXB].num = mram_config_vals[7] & + cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off + + cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXB].num = mram_config_vals[5]; + cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off + + cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; + cdev->mcfg[MRAM_TXE].num = mram_config_vals[6]; + cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off + + cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; + cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] & (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT); - dev_dbg(priv->device, - "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", - priv->mram_base, - priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num, - priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num, - priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num, - priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num, - priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num, - priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, - priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); - - m_can_init_ram(priv); + dev_dbg(cdev->dev, + "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", + cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num, + cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num, + cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num, + cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num, + cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num, + cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num, + cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num); } -static int m_can_plat_probe(struct platform_device *pdev) +void m_can_init_ram(struct m_can_classdev *cdev) { - struct net_device *dev; - struct m_can_priv *priv; - struct resource *res; - void __iomem *addr; - void __iomem *mram_addr; - struct clk *hclk, *cclk; - int irq, ret; - struct device_node *np; - u32 mram_config_vals[MRAM_CFG_LEN]; - u32 tx_fifo_size; - - np = pdev->dev.of_node; + int end, i, start; - hclk = devm_clk_get(&pdev->dev, "hclk"); - cclk = devm_clk_get(&pdev->dev, "cclk"); + /* initialize the entire Message RAM in use to avoid possible + * ECC/parity checksum errors when reading an uninitialized buffer + */ + start = cdev->mcfg[MRAM_SIDF].off; + end = cdev->mcfg[MRAM_TXB].off + + cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; - if (IS_ERR(hclk) || IS_ERR(cclk)) { - dev_err(&pdev->dev, "no clock found\n"); - ret = -ENODEV; - goto failed_ret; - } + for (i = start; i < end; i += 4) + m_can_fifo_write_no_off(cdev, i, 0x0); +} +EXPORT_SYMBOL_GPL(m_can_init_ram); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); - addr = devm_ioremap_resource(&pdev->dev, res); - irq = platform_get_irq_byname(pdev, "int0"); +int m_can_class_get_clocks(struct m_can_classdev *m_can_dev) +{ + int ret = 0; - if (IS_ERR(addr) || irq < 0) { - ret = -EINVAL; - goto failed_ret; - } + m_can_dev->hclk = devm_clk_get(m_can_dev->dev, "hclk"); + m_can_dev->cclk = devm_clk_get(m_can_dev->dev, "cclk"); - /* message ram could be shared */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); - if (!res) { + if (IS_ERR(m_can_dev->cclk)) { + dev_err(m_can_dev->dev, "no clock found\n"); ret = -ENODEV; - goto failed_ret; } - mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!mram_addr) { - ret = -ENOMEM; - goto failed_ret; - } + return ret; +} +EXPORT_SYMBOL_GPL(m_can_class_get_clocks); - /* get message ram configuration */ - ret = of_property_read_u32_array(np, "bosch,mram-cfg", - mram_config_vals, - sizeof(mram_config_vals) / 4); +struct m_can_classdev *m_can_class_allocate_dev(struct device *dev) +{ + struct m_can_classdev *class_dev = NULL; + u32 mram_config_vals[MRAM_CFG_LEN]; + struct net_device *net_dev; + u32 tx_fifo_size; + int ret; + + ret = fwnode_property_read_u32_array(dev_fwnode(dev), + "bosch,mram-cfg", + mram_config_vals, + sizeof(mram_config_vals) / 4); if (ret) { - dev_err(&pdev->dev, "Could not get Message RAM configuration."); - goto failed_ret; + dev_err(dev, "Could not get Message RAM configuration."); + goto out; } /* Get TX FIFO size @@ -1652,101 +1742,110 @@ static int m_can_plat_probe(struct platform_device *pdev) tx_fifo_size = mram_config_vals[7]; /* allocate the m_can device */ - dev = alloc_candev(sizeof(*priv), tx_fifo_size); - if (!dev) { - ret = -ENOMEM; - goto failed_ret; + net_dev = alloc_candev(sizeof(*class_dev), tx_fifo_size); + if (!net_dev) { + dev_err(dev, "Failed to allocate CAN device"); + goto out; } - priv = netdev_priv(dev); - dev->irq = irq; - priv->device = &pdev->dev; - priv->hclk = hclk; - priv->cclk = cclk; - priv->can.clock.freq = clk_get_rate(cclk); - priv->mram_base = mram_addr; + class_dev = netdev_priv(net_dev); + if (!class_dev) { + dev_err(dev, "Failed to init netdev cdevate"); + goto out; + } - platform_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); + class_dev->net = net_dev; + class_dev->dev = dev; + SET_NETDEV_DEV(net_dev, dev); - /* Enable clocks. Necessary to read Core Release in order to determine - * M_CAN version - */ - pm_runtime_enable(&pdev->dev); - ret = m_can_clk_start(priv); - if (ret) - goto pm_runtime_fail; + m_can_of_parse_mram(class_dev, mram_config_vals); +out: + return class_dev; +} +EXPORT_SYMBOL_GPL(m_can_class_allocate_dev); + +int m_can_class_register(struct m_can_classdev *m_can_dev) +{ + int ret; - ret = m_can_dev_setup(pdev, dev, addr); + if (m_can_dev->pm_clock_support) { + pm_runtime_enable(m_can_dev->dev); + ret = m_can_clk_start(m_can_dev); + if (ret) + goto pm_runtime_fail; + } + + ret = m_can_dev_setup(m_can_dev); if (ret) goto clk_disable; - ret = register_m_can_dev(dev); + ret = register_m_can_dev(m_can_dev->net); if (ret) { - dev_err(&pdev->dev, "registering %s failed (err=%d)\n", - KBUILD_MODNAME, ret); + dev_err(m_can_dev->dev, "registering %s failed (err=%d)\n", + m_can_dev->net->name, ret); goto clk_disable; } - m_can_of_parse_mram(priv, mram_config_vals); - - devm_can_led_init(dev); + devm_can_led_init(m_can_dev->net); - of_can_transceiver(dev); + of_can_transceiver(m_can_dev->net); - dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n", - KBUILD_MODNAME, dev->irq, priv->version); + dev_info(m_can_dev->dev, "%s device registered (irq=%d, version=%d)\n", + KBUILD_MODNAME, m_can_dev->net->irq, m_can_dev->version); /* Probe finished * Stop clocks. They will be reactivated once the M_CAN device is opened */ clk_disable: - m_can_clk_stop(priv); + m_can_clk_stop(m_can_dev); pm_runtime_fail: if (ret) { - pm_runtime_disable(&pdev->dev); - free_candev(dev); + if (m_can_dev->pm_clock_support) + pm_runtime_disable(m_can_dev->dev); + free_candev(m_can_dev->net); } -failed_ret: + return ret; } +EXPORT_SYMBOL_GPL(m_can_class_register); -static __maybe_unused int m_can_suspend(struct device *dev) +int m_can_class_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); + struct m_can_classdev *cdev = netdev_priv(ndev); if (netif_running(ndev)) { netif_stop_queue(ndev); netif_device_detach(ndev); m_can_stop(ndev); - m_can_clk_stop(priv); + m_can_clk_stop(cdev); } pinctrl_pm_select_sleep_state(dev); - priv->can.state = CAN_STATE_SLEEPING; + cdev->can.state = CAN_STATE_SLEEPING; return 0; } +EXPORT_SYMBOL_GPL(m_can_class_suspend); -static __maybe_unused int m_can_resume(struct device *dev) +int m_can_class_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); + struct m_can_classdev *cdev = netdev_priv(ndev); pinctrl_pm_select_default_state(dev); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + cdev->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(ndev)) { int ret; - ret = m_can_clk_start(priv); + ret = m_can_clk_start(cdev); if (ret) return ret; - m_can_init_ram(priv); + m_can_init_ram(cdev); m_can_start(ndev); netif_device_attach(ndev); netif_start_queue(ndev); @@ -1754,79 +1853,19 @@ static __maybe_unused int m_can_resume(struct device *dev) return 0; } +EXPORT_SYMBOL_GPL(m_can_class_resume); -static void unregister_m_can_dev(struct net_device *dev) +void m_can_class_unregister(struct m_can_classdev *m_can_dev) { - unregister_candev(dev); -} + unregister_candev(m_can_dev->net); -static int m_can_plat_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); + m_can_clk_stop(m_can_dev); - unregister_m_can_dev(dev); - - pm_runtime_disable(&pdev->dev); - - platform_set_drvdata(pdev, NULL); - - free_candev(dev); - - return 0; -} - -static int __maybe_unused m_can_runtime_suspend(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); - - clk_disable_unprepare(priv->cclk); - clk_disable_unprepare(priv->hclk); - - return 0; -} - -static int __maybe_unused m_can_runtime_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_priv *priv = netdev_priv(ndev); - int err; - - err = clk_prepare_enable(priv->hclk); - if (err) - return err; - - err = clk_prepare_enable(priv->cclk); - if (err) - clk_disable_unprepare(priv->hclk); - - return err; + free_candev(m_can_dev->net); } - -static const struct dev_pm_ops m_can_pmops = { - SET_RUNTIME_PM_OPS(m_can_runtime_suspend, - m_can_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) -}; - -static const struct of_device_id m_can_of_table[] = { - { .compatible = "bosch,m_can", .data = NULL }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, m_can_of_table); - -static struct platform_driver m_can_plat_driver = { - .driver = { - .name = KBUILD_MODNAME, - .of_match_table = m_can_of_table, - .pm = &m_can_pmops, - }, - .probe = m_can_plat_probe, - .remove = m_can_plat_remove, -}; - -module_platform_driver(m_can_plat_driver); +EXPORT_SYMBOL_GPL(m_can_class_unregister); MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); +MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller"); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h new file mode 100644 index 000000000000..49f42b50627a --- /dev/null +++ b/drivers/net/can/m_can/m_can.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* CAN bus driver for Bosch M_CAN controller + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef _CAN_M_CAN_H_ +#define _CAN_M_CAN_H_ + +#include <linux/can/core.h> +#include <linux/can/led.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/freezer.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pm_runtime.h> +#include <linux/iopoll.h> +#include <linux/can/dev.h> +#include <linux/pinctrl/consumer.h> + +/* m_can lec values */ +enum m_can_lec_type { + LEC_NO_ERROR = 0, + LEC_STUFF_ERROR, + LEC_FORM_ERROR, + LEC_ACK_ERROR, + LEC_BIT1_ERROR, + LEC_BIT0_ERROR, + LEC_CRC_ERROR, + LEC_UNUSED, +}; + +enum m_can_mram_cfg { + MRAM_SIDF = 0, + MRAM_XIDF, + MRAM_RXF0, + MRAM_RXF1, + MRAM_RXB, + MRAM_TXE, + MRAM_TXB, + MRAM_CFG_NUM, +}; + +/* address offset and element number for each FIFO/Buffer in the Message RAM */ +struct mram_cfg { + u16 off; + u8 num; +}; + +struct m_can_classdev; +struct m_can_ops { + /* Device specific call backs */ + int (*clear_interrupts)(struct m_can_classdev *cdev); + u32 (*read_reg)(struct m_can_classdev *cdev, int reg); + int (*write_reg)(struct m_can_classdev *cdev, int reg, int val); + u32 (*read_fifo)(struct m_can_classdev *cdev, int addr_offset); + int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset, + int val); + int (*init)(struct m_can_classdev *cdev); +}; + +struct m_can_classdev { + struct can_priv can; + struct napi_struct napi; + struct net_device *net; + struct device *dev; + struct clk *hclk; + struct clk *cclk; + + struct workqueue_struct *tx_wq; + struct work_struct tx_work; + struct sk_buff *tx_skb; + + struct can_bittiming_const *bit_timing; + struct can_bittiming_const *data_timing; + + struct m_can_ops *ops; + + void *device_data; + + int version; + int freq; + u32 irqstatus; + + int pm_clock_support; + int is_peripheral; + + struct mram_cfg mcfg[MRAM_CFG_NUM]; +}; + +struct m_can_classdev *m_can_class_allocate_dev(struct device *dev); +int m_can_class_register(struct m_can_classdev *cdev); +void m_can_class_unregister(struct m_can_classdev *cdev); +int m_can_class_get_clocks(struct m_can_classdev *cdev); +void m_can_init_ram(struct m_can_classdev *priv); +void m_can_config_endisable(struct m_can_classdev *priv, bool enable); + +int m_can_class_suspend(struct device *dev); +int m_can_class_resume(struct device *dev); +#endif /* _CAN_M_H_ */ diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c new file mode 100644 index 000000000000..6ac4c35f247a --- /dev/null +++ b/drivers/net/can/m_can/m_can_platform.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +// IOMapped CAN bus driver for Bosch M_CAN controller +// Copyright (C) 2014 Freescale Semiconductor, Inc. +// Dong Aisheng <b29396@freescale.com> +// +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +#include <linux/platform_device.h> + +#include "m_can.h" + +struct m_can_plat_priv { + void __iomem *base; + void __iomem *mram_base; +}; + +static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct m_can_plat_priv *priv = cdev->device_data; + + return readl(priv->base + reg); +} + +static u32 iomap_read_fifo(struct m_can_classdev *cdev, int offset) +{ + struct m_can_plat_priv *priv = cdev->device_data; + + return readl(priv->mram_base + offset); +} + +static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct m_can_plat_priv *priv = cdev->device_data; + + writel(val, priv->base + reg); + + return 0; +} + +static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, int val) +{ + struct m_can_plat_priv *priv = cdev->device_data; + + writel(val, priv->mram_base + offset); + + return 0; +} + +static struct m_can_ops m_can_plat_ops = { + .read_reg = iomap_read_reg, + .write_reg = iomap_write_reg, + .write_fifo = iomap_write_fifo, + .read_fifo = iomap_read_fifo, +}; + +static int m_can_plat_probe(struct platform_device *pdev) +{ + struct m_can_classdev *mcan_class; + struct m_can_plat_priv *priv; + struct resource *res; + void __iomem *addr; + void __iomem *mram_addr; + int irq, ret = 0; + + mcan_class = m_can_class_allocate_dev(&pdev->dev); + if (!mcan_class) + return -ENOMEM; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mcan_class->device_data = priv; + + m_can_class_get_clocks(mcan_class); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); + addr = devm_ioremap_resource(&pdev->dev, res); + irq = platform_get_irq_byname(pdev, "int0"); + if (IS_ERR(addr) || irq < 0) { + ret = -EINVAL; + goto failed_ret; + } + + /* message ram could be shared */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); + if (!res) { + ret = -ENODEV; + goto failed_ret; + } + + mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!mram_addr) { + ret = -ENOMEM; + goto failed_ret; + } + + priv->base = addr; + priv->mram_base = mram_addr; + + mcan_class->net->irq = irq; + mcan_class->pm_clock_support = 1; + mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk); + mcan_class->dev = &pdev->dev; + + mcan_class->ops = &m_can_plat_ops; + + mcan_class->is_peripheral = false; + + platform_set_drvdata(pdev, mcan_class->dev); + + m_can_init_ram(mcan_class); + + ret = m_can_class_register(mcan_class); + +failed_ret: + return ret; +} + +static __maybe_unused int m_can_suspend(struct device *dev) +{ + return m_can_class_suspend(dev); +} + +static __maybe_unused int m_can_resume(struct device *dev) +{ + return m_can_class_resume(dev); +} + +static int m_can_plat_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct m_can_classdev *mcan_class = netdev_priv(dev); + + m_can_class_unregister(mcan_class); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static int __maybe_unused m_can_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_classdev *mcan_class = netdev_priv(ndev); + + m_can_class_suspend(dev); + + clk_disable_unprepare(mcan_class->cclk); + clk_disable_unprepare(mcan_class->hclk); + + return 0; +} + +static int __maybe_unused m_can_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_classdev *mcan_class = netdev_priv(ndev); + int err; + + err = clk_prepare_enable(mcan_class->hclk); + if (err) + return err; + + err = clk_prepare_enable(mcan_class->cclk); + if (err) + clk_disable_unprepare(mcan_class->hclk); + + m_can_class_resume(dev); + + return err; +} + +static const struct dev_pm_ops m_can_pmops = { + SET_RUNTIME_PM_OPS(m_can_runtime_suspend, + m_can_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) +}; + +static const struct of_device_id m_can_of_table[] = { + { .compatible = "bosch,m_can", .data = NULL }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, m_can_of_table); + +static struct platform_driver m_can_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = m_can_of_table, + .pm = &m_can_pmops, + }, + .probe = m_can_plat_probe, + .remove = m_can_plat_remove, +}; + +module_platform_driver(m_can_plat_driver); + +MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); +MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("M_CAN driver for IO Mapped Bosch controllers"); diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c new file mode 100644 index 000000000000..3db619209fe1 --- /dev/null +++ b/drivers/net/can/m_can/tcan4x5x.c @@ -0,0 +1,505 @@ +// SPDX-License-Identifier: GPL-2.0 +// SPI to CAN driver for the Texas Instruments TCAN4x5x +// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ + +#include <linux/regmap.h> +#include <linux/spi/spi.h> + +#include <linux/regulator/consumer.h> +#include <linux/gpio/consumer.h> + +#include "m_can.h" + +#define DEVICE_NAME "tcan4x5x" +#define TCAN4X5X_EXT_CLK_DEF 40000000 + +#define TCAN4X5X_DEV_ID0 0x00 +#define TCAN4X5X_DEV_ID1 0x04 +#define TCAN4X5X_REV 0x08 +#define TCAN4X5X_STATUS 0x0C +#define TCAN4X5X_ERROR_STATUS 0x10 +#define TCAN4X5X_CONTROL 0x14 + +#define TCAN4X5X_CONFIG 0x800 +#define TCAN4X5X_TS_PRESCALE 0x804 +#define TCAN4X5X_TEST_REG 0x808 +#define TCAN4X5X_INT_FLAGS 0x820 +#define TCAN4X5X_MCAN_INT_REG 0x824 +#define TCAN4X5X_INT_EN 0x830 + +/* Interrupt bits */ +#define TCAN4X5X_CANBUSTERMOPEN_INT_EN BIT(30) +#define TCAN4X5X_CANHCANL_INT_EN BIT(29) +#define TCAN4X5X_CANHBAT_INT_EN BIT(28) +#define TCAN4X5X_CANLGND_INT_EN BIT(27) +#define TCAN4X5X_CANBUSOPEN_INT_EN BIT(26) +#define TCAN4X5X_CANBUSGND_INT_EN BIT(25) +#define TCAN4X5X_CANBUSBAT_INT_EN BIT(24) +#define TCAN4X5X_UVSUP_INT_EN BIT(22) +#define TCAN4X5X_UVIO_INT_EN BIT(21) +#define TCAN4X5X_TSD_INT_EN BIT(19) +#define TCAN4X5X_ECCERR_INT_EN BIT(16) +#define TCAN4X5X_CANINT_INT_EN BIT(15) +#define TCAN4X5X_LWU_INT_EN BIT(14) +#define TCAN4X5X_CANSLNT_INT_EN BIT(10) +#define TCAN4X5X_CANDOM_INT_EN BIT(8) +#define TCAN4X5X_CANBUS_ERR_INT_EN BIT(5) +#define TCAN4X5X_BUS_FAULT BIT(4) +#define TCAN4X5X_MCAN_INT BIT(1) +#define TCAN4X5X_ENABLE_TCAN_INT \ + (TCAN4X5X_MCAN_INT | TCAN4X5X_BUS_FAULT | \ + TCAN4X5X_CANBUS_ERR_INT_EN | TCAN4X5X_CANINT_INT_EN) + +/* MCAN Interrupt bits */ +#define TCAN4X5X_MCAN_IR_ARA BIT(29) +#define TCAN4X5X_MCAN_IR_PED BIT(28) +#define TCAN4X5X_MCAN_IR_PEA BIT(27) +#define TCAN4X5X_MCAN_IR_WD BIT(26) +#define TCAN4X5X_MCAN_IR_BO BIT(25) +#define TCAN4X5X_MCAN_IR_EW BIT(24) +#define TCAN4X5X_MCAN_IR_EP BIT(23) +#define TCAN4X5X_MCAN_IR_ELO BIT(22) +#define TCAN4X5X_MCAN_IR_BEU BIT(21) +#define TCAN4X5X_MCAN_IR_BEC BIT(20) +#define TCAN4X5X_MCAN_IR_DRX BIT(19) +#define TCAN4X5X_MCAN_IR_TOO BIT(18) +#define TCAN4X5X_MCAN_IR_MRAF BIT(17) +#define TCAN4X5X_MCAN_IR_TSW BIT(16) +#define TCAN4X5X_MCAN_IR_TEFL BIT(15) +#define TCAN4X5X_MCAN_IR_TEFF BIT(14) +#define TCAN4X5X_MCAN_IR_TEFW BIT(13) +#define TCAN4X5X_MCAN_IR_TEFN BIT(12) +#define TCAN4X5X_MCAN_IR_TFE BIT(11) +#define TCAN4X5X_MCAN_IR_TCF BIT(10) +#define TCAN4X5X_MCAN_IR_TC BIT(9) +#define TCAN4X5X_MCAN_IR_HPM BIT(8) +#define TCAN4X5X_MCAN_IR_RF1L BIT(7) +#define TCAN4X5X_MCAN_IR_RF1F BIT(6) +#define TCAN4X5X_MCAN_IR_RF1W BIT(5) +#define TCAN4X5X_MCAN_IR_RF1N BIT(4) +#define TCAN4X5X_MCAN_IR_RF0L BIT(3) +#define TCAN4X5X_MCAN_IR_RF0F BIT(2) +#define TCAN4X5X_MCAN_IR_RF0W BIT(1) +#define TCAN4X5X_MCAN_IR_RF0N BIT(0) +#define TCAN4X5X_ENABLE_MCAN_INT \ + (TCAN4X5X_MCAN_IR_TC | TCAN4X5X_MCAN_IR_RF0N | \ + TCAN4X5X_MCAN_IR_RF1N | TCAN4X5X_MCAN_IR_RF0F | \ + TCAN4X5X_MCAN_IR_RF1F) + +#define TCAN4X5X_MRAM_START 0x8000 +#define TCAN4X5X_MCAN_OFFSET 0x1000 +#define TCAN4X5X_MAX_REGISTER 0x8fff + +#define TCAN4X5X_CLEAR_ALL_INT 0xffffffff +#define TCAN4X5X_SET_ALL_INT 0xffffffff + +#define TCAN4X5X_WRITE_CMD (0x61 << 24) +#define TCAN4X5X_READ_CMD (0x41 << 24) + +#define TCAN4X5X_MODE_SEL_MASK (BIT(7) | BIT(6)) +#define TCAN4X5X_MODE_SLEEP 0x00 +#define TCAN4X5X_MODE_STANDBY BIT(6) +#define TCAN4X5X_MODE_NORMAL BIT(7) + +#define TCAN4X5X_SW_RESET BIT(2) + +#define TCAN4X5X_MCAN_CONFIGURED BIT(5) +#define TCAN4X5X_WATCHDOG_EN BIT(3) +#define TCAN4X5X_WD_60_MS_TIMER 0 +#define TCAN4X5X_WD_600_MS_TIMER BIT(28) +#define TCAN4X5X_WD_3_S_TIMER BIT(29) +#define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29)) + +struct tcan4x5x_priv { + struct regmap *regmap; + struct spi_device *spi; + + struct m_can_classdev *mcan_dev; + + struct gpio_desc *reset_gpio; + struct gpio_desc *device_wake_gpio; + struct gpio_desc *device_state_gpio; + struct regulator *power; + + /* Register based ip */ + int mram_start; + int reg_offset; +}; + +static struct can_bittiming_const tcan4x5x_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 2, + .tseg1_max = 31, + .tseg2_min = 2, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static struct can_bittiming_const tcan4x5x_data_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 1, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) +{ + int wake_state = 0; + + if (priv->device_state_gpio) + wake_state = gpiod_get_value(priv->device_state_gpio); + + if (priv->device_wake_gpio && wake_state) { + gpiod_set_value(priv->device_wake_gpio, 0); + usleep_range(5, 50); + gpiod_set_value(priv->device_wake_gpio, 1); + } +} + +static int regmap_spi_gather_write(void *context, const void *reg, + size_t reg_len, const void *val, + size_t val_len) +{ + struct device *dev = context; + struct spi_device *spi = to_spi_device(dev); + struct spi_message m; + u32 addr; + struct spi_transfer t[2] = { + { .tx_buf = &addr, .len = reg_len, .cs_change = 0,}, + { .tx_buf = val, .len = val_len, }, + }; + + addr = TCAN4X5X_WRITE_CMD | (*((u16 *)reg) << 8) | val_len >> 2; + + spi_message_init(&m); + spi_message_add_tail(&t[0], &m); + spi_message_add_tail(&t[1], &m); + + return spi_sync(spi, &m); +} + +static int tcan4x5x_regmap_write(void *context, const void *data, size_t count) +{ + u16 *reg = (u16 *)(data); + const u32 *val = data + 4; + + return regmap_spi_gather_write(context, reg, 4, val, count - 4); +} + +static int regmap_spi_async_write(void *context, + const void *reg, size_t reg_len, + const void *val, size_t val_len, + struct regmap_async *a) +{ + return -ENOTSUPP; +} + +static struct regmap_async *regmap_spi_async_alloc(void) +{ + return NULL; +} + +static int tcan4x5x_regmap_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct device *dev = context; + struct spi_device *spi = to_spi_device(dev); + + u32 addr = TCAN4X5X_READ_CMD | (*((u16 *)reg) << 8) | val_size >> 2; + + return spi_write_then_read(spi, &addr, reg_size, (u32 *)val, val_size); +} + +static struct regmap_bus tcan4x5x_bus = { + .write = tcan4x5x_regmap_write, + .gather_write = regmap_spi_gather_write, + .async_write = regmap_spi_async_write, + .async_alloc = regmap_spi_async_alloc, + .read = tcan4x5x_regmap_read, + .read_flag_mask = 0x00, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, +}; + +static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct tcan4x5x_priv *priv = cdev->device_data; + u32 val; + + regmap_read(priv->regmap, priv->reg_offset + reg, &val); + + return val; +} + +static u32 tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset) +{ + struct tcan4x5x_priv *priv = cdev->device_data; + u32 val; + + regmap_read(priv->regmap, priv->mram_start + addr_offset, &val); + + return val; +} + +static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct tcan4x5x_priv *priv = cdev->device_data; + + return regmap_write(priv->regmap, priv->reg_offset + reg, val); +} + +static int tcan4x5x_write_fifo(struct m_can_classdev *cdev, + int addr_offset, int val) +{ + struct tcan4x5x_priv *priv = cdev->device_data; + + return regmap_write(priv->regmap, priv->mram_start + addr_offset, val); +} + +static int tcan4x5x_power_enable(struct regulator *reg, int enable) +{ + if (IS_ERR_OR_NULL(reg)) + return 0; + + if (enable) + return regulator_enable(reg); + else + return regulator_disable(reg); +} + +static int tcan4x5x_write_tcan_reg(struct m_can_classdev *cdev, + int reg, int val) +{ + struct tcan4x5x_priv *priv = cdev->device_data; + + return regmap_write(priv->regmap, reg, val); +} + +static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev) +{ + int ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_STATUS, + TCAN4X5X_CLEAR_ALL_INT); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG, + TCAN4X5X_ENABLE_MCAN_INT); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS, + TCAN4X5X_CLEAR_ALL_INT); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS, + TCAN4X5X_CLEAR_ALL_INT); + if (ret) + return ret; + + return ret; +} + +static int tcan4x5x_init(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + int ret; + + tcan4x5x_check_wake(tcan4x5x); + + ret = tcan4x5x_clear_interrupts(cdev); + if (ret) + return ret; + + ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_EN, + TCAN4X5X_ENABLE_TCAN_INT); + if (ret) + return ret; + + ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL); + if (ret) + return ret; + + /* Zero out the MCAN buffers */ + m_can_init_ram(cdev); + + return ret; +} + +static int tcan4x5x_parse_config(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + + tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", + GPIOD_OUT_HIGH); + if (IS_ERR(tcan4x5x->device_wake_gpio)) { + dev_err(cdev->dev, "device-wake gpio not defined\n"); + return -EINVAL; + } + + tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(tcan4x5x->reset_gpio)) + tcan4x5x->reset_gpio = NULL; + + tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, + "device-state", + GPIOD_IN); + if (IS_ERR(tcan4x5x->device_state_gpio)) + tcan4x5x->device_state_gpio = NULL; + + tcan4x5x->power = devm_regulator_get_optional(cdev->dev, + "vsup"); + if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + return 0; +} + +static const struct regmap_config tcan4x5x_regmap = { + .reg_bits = 32, + .val_bits = 32, + .cache_type = REGCACHE_NONE, + .max_register = TCAN4X5X_MAX_REGISTER, +}; + +static struct m_can_ops tcan4x5x_ops = { + .init = tcan4x5x_init, + .read_reg = tcan4x5x_read_reg, + .write_reg = tcan4x5x_write_reg, + .write_fifo = tcan4x5x_write_fifo, + .read_fifo = tcan4x5x_read_fifo, + .clear_interrupts = tcan4x5x_clear_interrupts, +}; + +static int tcan4x5x_can_probe(struct spi_device *spi) +{ + struct tcan4x5x_priv *priv; + struct m_can_classdev *mcan_class; + int freq, ret; + + mcan_class = m_can_class_allocate_dev(&spi->dev); + if (!mcan_class) + return -ENOMEM; + + priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mcan_class->device_data = priv; + + m_can_class_get_clocks(mcan_class); + if (IS_ERR(mcan_class->cclk)) { + dev_err(&spi->dev, "no CAN clock source defined\n"); + freq = TCAN4X5X_EXT_CLK_DEF; + } else { + freq = clk_get_rate(mcan_class->cclk); + } + + /* Sanity check */ + if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF) + return -ERANGE; + + priv->reg_offset = TCAN4X5X_MCAN_OFFSET; + priv->mram_start = TCAN4X5X_MRAM_START; + priv->spi = spi; + priv->mcan_dev = mcan_class; + + mcan_class->pm_clock_support = 0; + mcan_class->can.clock.freq = freq; + mcan_class->dev = &spi->dev; + mcan_class->ops = &tcan4x5x_ops; + mcan_class->is_peripheral = true; + mcan_class->bit_timing = &tcan4x5x_bittiming_const; + mcan_class->data_timing = &tcan4x5x_data_bittiming_const; + mcan_class->net->irq = spi->irq; + + spi_set_drvdata(spi, priv); + + ret = tcan4x5x_parse_config(mcan_class); + if (ret) + goto out_clk; + + /* Configure the SPI bus */ + spi->bits_per_word = 32; + ret = spi_setup(spi); + if (ret) + goto out_clk; + + priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus, + &spi->dev, &tcan4x5x_regmap); + + tcan4x5x_power_enable(priv->power, 1); + + ret = m_can_class_register(mcan_class); + if (ret) + goto out_power; + + netdev_info(mcan_class->net, "TCAN4X5X successfully initialized.\n"); + return 0; + +out_power: + tcan4x5x_power_enable(priv->power, 0); +out_clk: + if (!IS_ERR(mcan_class->cclk)) { + clk_disable_unprepare(mcan_class->cclk); + clk_disable_unprepare(mcan_class->hclk); + } + + dev_err(&spi->dev, "Probe failed, err=%d\n", ret); + return ret; +} + +static int tcan4x5x_can_remove(struct spi_device *spi) +{ + struct tcan4x5x_priv *priv = spi_get_drvdata(spi); + + tcan4x5x_power_enable(priv->power, 0); + + m_can_class_unregister(priv->mcan_dev); + + return 0; +} + +static const struct of_device_id tcan4x5x_of_match[] = { + { .compatible = "ti,tcan4x5x", }, + { } +}; +MODULE_DEVICE_TABLE(of, tcan4x5x_of_match); + +static const struct spi_device_id tcan4x5x_id_table[] = { + { + .name = "tcan4x5x", + .driver_data = 0, + }, + { } +}; +MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table); + +static struct spi_driver tcan4x5x_can_driver = { + .driver = { + .name = DEVICE_NAME, + .of_match_table = tcan4x5x_of_match, + .pm = NULL, + }, + .id_table = tcan4x5x_id_table, + .probe = tcan4x5x_can_probe, + .remove = tcan4x5x_can_remove, +}; +module_spi_driver(tcan4x5x_can_driver); + +MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); +MODULE_DESCRIPTION("Texas Instruments TCAN4x5x CAN driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 13e66297b65f..bf5adea9c0a3 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -15,11 +15,17 @@ #include <linux/can/led.h> #include <linux/can/dev.h> #include <linux/clk.h> -#include <linux/can/platform/rcar_can.h> #include <linux/of.h> #define RCAR_CAN_DRV_NAME "rcar_can" +/* Clock Select Register settings */ +enum CLKR { + CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */ + CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */ + CLKR_CLKEXT = 3, /* Externally input clock */ +}; + #define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \ BIT(CLKR_CLKEXT)) @@ -736,7 +742,6 @@ static const char * const clock_names[] = { static int rcar_can_probe(struct platform_device *pdev) { - struct rcar_can_platform_data *pdata; struct rcar_can_priv *priv; struct net_device *ndev; struct resource *mem; @@ -745,21 +750,11 @@ static int rcar_can_probe(struct platform_device *pdev) int err = -ENODEV; int irq; - if (pdev->dev.of_node) { - of_property_read_u32(pdev->dev.of_node, - "renesas,can-clock-select", &clock_select); - } else { - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) { - dev_err(&pdev->dev, "No platform data provided!\n"); - goto fail; - } - clock_select = pdata->clock_select; - } + of_property_read_u32(pdev->dev.of_node, "renesas,can-clock-select", + &clock_select); irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "No IRQ resource\n"); err = irq; goto fail; } diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index de34a4b82d4a..edaa1ca972c1 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1652,14 +1652,12 @@ static int rcar_canfd_probe(struct platform_device *pdev) ch_irq = platform_get_irq(pdev, 0); if (ch_irq < 0) { - dev_err(&pdev->dev, "no Channel IRQ resource\n"); err = ch_irq; goto fail_dev; } g_irq = platform_get_irq(pdev, 1); if (g_irq < 0) { - dev_err(&pdev->dev, "no Global IRQ resource\n"); err = g_irq; goto fail_dev; } diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index 6b72da2f18a6..32d242dc0d9f 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -1,26 +1,18 @@ # SPDX-License-Identifier: GPL-2.0-only + menuconfig CAN_SJA1000 tristate "Philips/NXP SJA1000 devices" depends on HAS_IOMEM if CAN_SJA1000 -config CAN_SJA1000_ISA - tristate "ISA Bus based legacy SJA1000 driver" - ---help--- - This driver adds legacy support for SJA1000 chips connected to - the ISA bus using I/O port, memory mapped or indirect access. - -config CAN_SJA1000_PLATFORM - tristate "Generic Platform Bus based SJA1000 driver" +config CAN_EMS_PCI + tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card" + depends on PCI ---help--- - This driver adds support for the SJA1000 chips connected to - the "platform bus" (Linux abstraction for directly to the - processor attached devices). Which can be found on various - boards from Phytec (http://www.phytec.de) like the PCM027, - PCM038. It also provides the OpenFirmware "platform bus" found - on embedded systems with OpenFirmware bindings, e.g. if you - have a PowerPC based system you may want to enable this option. + This driver is for the one, two or four channel CPC-PCI, + CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche + (http://www.ems-wuensche.de). config CAN_EMS_PCMCIA tristate "EMS CPC-CARD Card" @@ -29,23 +21,22 @@ config CAN_EMS_PCMCIA This driver is for the one or two channel CPC-CARD cards from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de). -config CAN_EMS_PCI - tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card" +config CAN_F81601 + tristate "Fintek F81601 PCIE to 2 CAN Controller" depends on PCI - ---help--- - This driver is for the one, two or four channel CPC-PCI, - CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche - (http://www.ems-wuensche.de). + help + This driver adds support for Fintek F81601 PCIE to 2 CAN + Controller. It had internal 24MHz clock source, but it can + be changed by manufacturer. Use modinfo to get usage for + parameters. Visit http://www.fintek.com.tw to get more + information. -config CAN_PEAK_PCMCIA - tristate "PEAK PCAN-PC Card" - depends on PCMCIA - depends on HAS_IOPORT_MAP +config CAN_KVASER_PCI + tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards" + depends on PCI ---help--- - This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) - from PEAK-System (http://www.peak-system.com). To compile this - driver as a module, choose M here: the module will be called - peak_pcmcia. + This driver is for the PCIcanx and PCIcan cards (1, 2 or + 4 channel) from Kvaser (http://www.kvaser.com). config CAN_PEAK_PCI tristate "PEAK PCAN-PCI/PCIe/miniPCI Cards" @@ -66,12 +57,15 @@ config CAN_PEAK_PCIEC Technik. This will also automatically select I2C and I2C_ALGO configuration options. -config CAN_KVASER_PCI - tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards" - depends on PCI +config CAN_PEAK_PCMCIA + tristate "PEAK PCAN-PC Card" + depends on PCMCIA + depends on HAS_IOPORT_MAP ---help--- - This driver is for the PCIcanx and PCIcan cards (1, 2 or - 4 channel) from Kvaser (http://www.kvaser.com). + This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) + from PEAK-System (http://www.peak-system.com). To compile this + driver as a module, choose M here: the module will be called + peak_pcmcia. config CAN_PLX_PCI tristate "PLX90xx PCI-bridge based Cards" @@ -91,6 +85,23 @@ config CAN_PLX_PCI - Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card (http://www.connecttech.com) - ASEM CAN raw - 2 isolated CAN channels (www.asem.it) +config CAN_SJA1000_ISA + tristate "ISA Bus based legacy SJA1000 driver" + ---help--- + This driver adds legacy support for SJA1000 chips connected to + the ISA bus using I/O port, memory mapped or indirect access. + +config CAN_SJA1000_PLATFORM + tristate "Generic Platform Bus based SJA1000 driver" + ---help--- + This driver adds support for the SJA1000 chips connected to + the "platform bus" (Linux abstraction for directly to the + processor attached devices). Which can be found on various + boards from Phytec (http://www.phytec.de) like the PCM027, + PCM038. It also provides the OpenFirmware "platform bus" found + on embedded systems with OpenFirmware bindings, e.g. if you + have a PowerPC based system you may want to enable this option. + config CAN_TSCAN1 tristate "TS-CAN1 PC104 boards" depends on ISA diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile index 9253aaf9e739..500ce1dddaec 100644 --- a/drivers/net/can/sja1000/Makefile +++ b/drivers/net/can/sja1000/Makefile @@ -3,13 +3,14 @@ # Makefile for the SJA1000 CAN controller drivers. # -obj-$(CONFIG_CAN_SJA1000) += sja1000.o -obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o -obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o -obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o +obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o +obj-$(CONFIG_CAN_F81601) += f81601.o obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o -obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o +obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o +obj-$(CONFIG_CAN_SJA1000) += sja1000.o +obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o +obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o obj-$(CONFIG_CAN_TSCAN1) += tscan1.o diff --git a/drivers/net/can/sja1000/f81601.c b/drivers/net/can/sja1000/f81601.c new file mode 100644 index 000000000000..8f25e95814ef --- /dev/null +++ b/drivers/net/can/sja1000/f81601.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Fintek F81601 PCIE to 2 CAN controller driver + * + * Copyright (C) 2019 Peter Hong <peter_hong@fintek.com.tw> + * Copyright (C) 2019 Linux Foundation + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/can/dev.h> +#include <linux/io.h> + +#include "sja1000.h" + +#define F81601_PCI_MAX_CHAN 2 + +#define F81601_DECODE_REG 0x209 +#define F81601_IO_MODE BIT(7) +#define F81601_MEM_MODE BIT(6) +#define F81601_CFG_MODE BIT(5) +#define F81601_CAN2_INTERNAL_CLK BIT(3) +#define F81601_CAN1_INTERNAL_CLK BIT(2) +#define F81601_CAN2_EN BIT(1) +#define F81601_CAN1_EN BIT(0) + +#define F81601_TRAP_REG 0x20a +#define F81601_CAN2_HAS_EN BIT(4) + +struct f81601_pci_card { + void __iomem *addr; + spinlock_t lock; /* use this spin lock only for write access */ + struct pci_dev *dev; + struct net_device *net_dev[F81601_PCI_MAX_CHAN]; +}; + +static const struct pci_device_id f81601_pci_tbl[] = { + { PCI_DEVICE(0x1c29, 0x1703) }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(pci, f81601_pci_tbl); + +static bool internal_clk = true; +module_param(internal_clk, bool, 0444); +MODULE_PARM_DESC(internal_clk, "Use internal clock, default true (24MHz)"); + +static unsigned int external_clk; +module_param(external_clk, uint, 0444); +MODULE_PARM_DESC(external_clk, "External clock when internal_clk disabled"); + +static u8 f81601_pci_read_reg(const struct sja1000_priv *priv, int port) +{ + return readb(priv->reg_base + port); +} + +static void f81601_pci_write_reg(const struct sja1000_priv *priv, int port, + u8 val) +{ + struct f81601_pci_card *card = priv->priv; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + writeb(val, priv->reg_base + port); + readb(priv->reg_base); + spin_unlock_irqrestore(&card->lock, flags); +} + +static void f81601_pci_remove(struct pci_dev *pdev) +{ + struct f81601_pci_card *card = pci_get_drvdata(pdev); + struct net_device *dev; + int i; + + for (i = 0; i < ARRAY_SIZE(card->net_dev); i++) { + dev = card->net_dev[i]; + if (!dev) + continue; + + dev_info(&pdev->dev, "%s: Removing %s\n", __func__, dev->name); + + unregister_sja1000dev(dev); + free_sja1000dev(dev); + } +} + +/* Probe F81601 based device for the SJA1000 chips and register each + * available CAN channel to SJA1000 Socket-CAN subsystem. + */ +static int f81601_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct sja1000_priv *priv; + struct net_device *dev; + struct f81601_pci_card *card; + int err, i, count; + u8 tmp; + + if (pcim_enable_device(pdev) < 0) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return -ENODEV; + } + + dev_info(&pdev->dev, "Detected card at slot #%i\n", + PCI_SLOT(pdev->devfn)); + + card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + + card->dev = pdev; + spin_lock_init(&card->lock); + + pci_set_drvdata(pdev, card); + + tmp = F81601_IO_MODE | F81601_MEM_MODE | F81601_CFG_MODE | + F81601_CAN2_EN | F81601_CAN1_EN; + + if (internal_clk) { + tmp |= F81601_CAN2_INTERNAL_CLK | F81601_CAN1_INTERNAL_CLK; + + dev_info(&pdev->dev, + "F81601 running with internal clock: 24Mhz\n"); + } else { + dev_info(&pdev->dev, + "F81601 running with external clock: %dMhz\n", + external_clk / 1000000); + } + + pci_write_config_byte(pdev, F81601_DECODE_REG, tmp); + + card->addr = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); + + if (!card->addr) { + err = -ENOMEM; + dev_err(&pdev->dev, "%s: Failed to remap BAR\n", __func__); + goto failure_cleanup; + } + + /* read CAN2_HW_EN strap pin to detect how many CANBUS do we have */ + count = ARRAY_SIZE(card->net_dev); + pci_read_config_byte(pdev, F81601_TRAP_REG, &tmp); + if (!(tmp & F81601_CAN2_HAS_EN)) + count = 1; + + for (i = 0; i < count; i++) { + dev = alloc_sja1000dev(0); + if (!dev) { + err = -ENOMEM; + goto failure_cleanup; + } + + priv = netdev_priv(dev); + priv->priv = card; + priv->irq_flags = IRQF_SHARED; + priv->reg_base = card->addr + 0x80 * i; + priv->read_reg = f81601_pci_read_reg; + priv->write_reg = f81601_pci_write_reg; + + if (internal_clk) + priv->can.clock.freq = 24000000 / 2; + else + priv->can.clock.freq = external_clk / 2; + + priv->ocr = OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL; + priv->cdr = CDR_CBP; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->dev_id = i; + dev->irq = pdev->irq; + + /* Register SJA1000 device */ + err = register_sja1000dev(dev); + if (err) { + dev_err(&pdev->dev, + "%s: Registering device failed: %x\n", __func__, + err); + free_sja1000dev(dev); + goto failure_cleanup; + } + + card->net_dev[i] = dev; + dev_info(&pdev->dev, "Channel #%d, %s at 0x%p, irq %d\n", i, + dev->name, priv->reg_base, dev->irq); + } + + return 0; + + failure_cleanup: + dev_err(&pdev->dev, "%s: failed: %d. Cleaning Up.\n", __func__, err); + f81601_pci_remove(pdev); + + return err; +} + +static struct pci_driver f81601_pci_driver = { + .name = "f81601", + .id_table = f81601_pci_tbl, + .probe = f81601_pci_probe, + .remove = f81601_pci_remove, +}; + +MODULE_DESCRIPTION("Fintek F81601 PCIE to 2 CANBUS adaptor driver"); +MODULE_AUTHOR("Peter Hong <peter_hong@fintek.com.tw>"); +MODULE_LICENSE("GPL v2"); + +module_pci_driver(f81601_pci_driver); diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 68366d57916c..8c0244f51059 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -417,7 +417,7 @@ static void peak_pciec_write_reg(const struct sja1000_priv *priv, peak_pci_write_reg(priv, port, val); } -static struct i2c_algo_bit_data peak_pciec_i2c_bit_ops = { +static const struct i2c_algo_bit_data peak_pciec_i2c_bit_ops = { .setsda = pita_setsda, .setscl = pita_setscl, .getsda = pita_getsda, diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index aa97dbc797b6..bb6032211043 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -55,6 +55,7 @@ #include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/skb.h> +#include <linux/can/can-ml.h> MODULE_ALIAS_LDISC(N_SLCAN); MODULE_DESCRIPTION("serial line CAN interface"); @@ -514,6 +515,7 @@ static struct slcan *slc_alloc(void) char name[IFNAMSIZ]; struct net_device *dev = NULL; struct slcan *sl; + int size; for (i = 0; i < maxdev; i++) { dev = slcan_devs[i]; @@ -527,12 +529,14 @@ static struct slcan *slc_alloc(void) return NULL; sprintf(name, "slcan%d", i); - dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, slc_setup); + size = ALIGN(sizeof(*sl), NETDEV_ALIGN) + sizeof(struct can_ml_priv); + dev = alloc_netdev(size, name, NET_NAME_UNKNOWN, slc_setup); if (!dev) return NULL; dev->base_addr = i; sl = netdev_priv(dev); + dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN); /* Initialize channel control data */ sl->magic = SLCAN_MAGIC; diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 03a711c3221b..73d48c3b8ded 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -21,7 +21,6 @@ #include <linux/completion.h> #include <linux/delay.h> #include <linux/device.h> -#include <linux/dma-mapping.h> #include <linux/freezer.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -126,10 +125,6 @@ #define DEVICE_NAME "hi3110" -static int hi3110_enable_dma = 1; /* Enable SPI DMA. Default: 1 (On) */ -module_param(hi3110_enable_dma, int, 0444); -MODULE_PARM_DESC(hi3110_enable_dma, "Enable SPI DMA. Default: 1 (On)"); - static const struct can_bittiming_const hi3110_bittiming_const = { .name = DEVICE_NAME, .tseg1_min = 2, @@ -156,8 +151,6 @@ struct hi3110_priv { u8 *spi_tx_buf; u8 *spi_rx_buf; - dma_addr_t spi_tx_dma; - dma_addr_t spi_rx_dma; struct sk_buff *tx_skb; int tx_len; @@ -184,8 +177,7 @@ static void hi3110_clean(struct net_device *net) if (priv->tx_skb || priv->tx_len) net->stats.tx_errors++; - if (priv->tx_skb) - dev_kfree_skb(priv->tx_skb); + dev_kfree_skb(priv->tx_skb); if (priv->tx_len) can_free_echo_skb(priv->net, 0); priv->tx_skb = NULL; @@ -217,13 +209,6 @@ static int hi3110_spi_trans(struct spi_device *spi, int len) int ret; spi_message_init(&m); - - if (hi3110_enable_dma) { - t.tx_dma = priv->spi_tx_dma; - t.rx_dma = priv->spi_rx_dma; - m.is_dma_mapped = 1; - } - spi_message_add_tail(&t, &m); ret = spi_sync(spi, &m); @@ -915,43 +900,18 @@ static int hi3110_can_probe(struct spi_device *spi) priv->spi = spi; mutex_init(&priv->hi3110_lock); - /* If requested, allocate DMA buffers */ - if (hi3110_enable_dma) { - spi->dev.coherent_dma_mask = ~0; - - /* Minimum coherent DMA allocation is PAGE_SIZE, so allocate - * that much and share it between Tx and Rx DMA buffers. - */ - priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev, - PAGE_SIZE, - &priv->spi_tx_dma, - GFP_DMA); - - if (priv->spi_tx_buf) { - priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2)); - priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma + - (PAGE_SIZE / 2)); - } else { - /* Fall back to non-DMA */ - hi3110_enable_dma = 0; - } + priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, + GFP_KERNEL); + if (!priv->spi_tx_buf) { + ret = -ENOMEM; + goto error_probe; } + priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, + GFP_KERNEL); - /* Allocate non-DMA buffers */ - if (!hi3110_enable_dma) { - priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, - GFP_KERNEL); - if (!priv->spi_tx_buf) { - ret = -ENOMEM; - goto error_probe; - } - priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, - GFP_KERNEL); - - if (!priv->spi_rx_buf) { - ret = -ENOMEM; - goto error_probe; - } + if (!priv->spi_rx_buf) { + ret = -ENOMEM; + goto error_probe; } SET_NETDEV_DEV(net, &spi->dev); diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 12358f06d194..bee9f7b8dad6 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface +/* CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface * * MCP2510 support and bug fixes by Christian Pellegrin * <chripell@evolware.org> @@ -18,26 +17,6 @@ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix * - Simon Kallweit, intefo AG * Copyright 2007 - * - * Your platform definition file should specify something like: - * - * static struct mcp251x_platform_data mcp251x_info = { - * .oscillator_frequency = 8000000, - * }; - * - * static struct spi_board_info spi_board_info[] = { - * { - * .modalias = "mcp2510", - * // "mcp2515" or "mcp25625" depending on your controller - * .platform_data = &mcp251x_info, - * .irq = IRQ_EINT13, - * .max_speed_hz = 2*1000*1000, - * .chip_select = 2, - * }, - * }; - * - * Please see mcp251x.h for a description of the fields in - * struct mcp251x_platform_data. */ #include <linux/can/core.h> @@ -48,15 +27,13 @@ #include <linux/completion.h> #include <linux/delay.h> #include <linux/device.h> -#include <linux/dma-mapping.h> #include <linux/freezer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> -#include <linux/of.h> -#include <linux/of_device.h> +#include <linux/property.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spi/spi.h> @@ -75,7 +52,6 @@ #define RTS_TXB2 0x04 #define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07)) - /* MPC251x registers */ #define CANSTAT 0x0e #define CANCTRL 0x0f @@ -191,8 +167,7 @@ #define SET_BYTE(val, byte) \ (((val) & 0xff) << ((byte) * 8)) -/* - * Buffer size required for the largest SPI transfer (i.e., reading a +/* Buffer size required for the largest SPI transfer (i.e., reading a * frame) */ #define CAN_FRAME_MAX_DATA_LEN 8 @@ -205,10 +180,6 @@ #define DEVICE_NAME "mcp251x" -static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */ -module_param(mcp251x_enable_dma, int, 0444); -MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)"); - static const struct can_bittiming_const mcp251x_bittiming_const = { .name = DEVICE_NAME, .tseg1_min = 3, @@ -237,8 +208,6 @@ struct mcp251x_priv { u8 *spi_tx_buf; u8 *spi_rx_buf; - dma_addr_t spi_tx_dma; - dma_addr_t spi_rx_dma; struct sk_buff *tx_skb; int tx_len; @@ -274,16 +243,14 @@ static void mcp251x_clean(struct net_device *net) if (priv->tx_skb || priv->tx_len) net->stats.tx_errors++; - if (priv->tx_skb) - dev_kfree_skb(priv->tx_skb); + dev_kfree_skb(priv->tx_skb); if (priv->tx_len) can_free_echo_skb(priv->net, 0); priv->tx_skb = NULL; priv->tx_len = 0; } -/* - * Note about handling of error return of mcp251x_spi_trans: accessing +/* Note about handling of error return of mcp251x_spi_trans: accessing * registers via SPI is not really different conceptually than using * normal I/O assembler instructions, although it's much more * complicated from a practical POV. So it's not advisable to always @@ -308,13 +275,6 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len) int ret; spi_message_init(&m); - - if (mcp251x_enable_dma) { - t.tx_dma = priv->spi_tx_dma; - t.rx_dma = priv->spi_rx_dma; - m.is_dma_mapped = 1; - } - spi_message_add_tail(&t, &m); ret = spi_sync(spi, &m); @@ -323,7 +283,7 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len) return ret; } -static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg) +static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg) { struct mcp251x_priv *priv = spi_get_drvdata(spi); u8 val = 0; @@ -337,8 +297,7 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg) return val; } -static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg, - uint8_t *v1, uint8_t *v2) +static void mcp251x_read_2regs(struct spi_device *spi, u8 reg, u8 *v1, u8 *v2) { struct mcp251x_priv *priv = spi_get_drvdata(spi); @@ -351,7 +310,7 @@ static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg, *v2 = priv->spi_rx_buf[3]; } -static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val) +static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val) { struct mcp251x_priv *priv = spi_get_drvdata(spi); @@ -363,7 +322,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val) } static void mcp251x_write_bits(struct spi_device *spi, u8 reg, - u8 mask, uint8_t val) + u8 mask, u8 val) { struct mcp251x_priv *priv = spi_get_drvdata(spi); @@ -565,8 +524,7 @@ static int mcp251x_set_normal_mode(struct spi_device *spi) while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) { schedule(); if (time_after(jiffies, timeout)) { - dev_err(&spi->dev, "MCP251x didn't" - " enter in normal mode\n"); + dev_err(&spi->dev, "MCP251x didn't enter in normal mode\n"); return -EBUSY; } } @@ -612,7 +570,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi) static int mcp251x_hw_reset(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); - u8 reg; + unsigned long timeout; int ret; /* Wait for oscillator startup timer after power up */ @@ -626,10 +584,19 @@ static int mcp251x_hw_reset(struct spi_device *spi) /* Wait for oscillator startup timer after reset */ mdelay(MCP251X_OST_DELAY_MS); - reg = mcp251x_read_reg(spi, CANSTAT); - if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF) - return -ENODEV; - + /* Wait for reset to finish */ + timeout = jiffies + HZ; + while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) != + CANCTRL_REQOP_CONF) { + usleep_range(MCP251X_OST_DELAY_MS * 1000, + MCP251X_OST_DELAY_MS * 1000 * 2); + + if (time_after(jiffies, timeout)) { + dev_err(&spi->dev, + "MCP251x didn't enter in conf mode after reset\n"); + return -EBUSY; + } + } return 0; } @@ -799,7 +766,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) * (The MCP2515/25625 does this automatically.) */ if (mcp251x_is_2510(spi)) - mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00); + mcp251x_write_bits(spi, CANINTF, + CANINTF_RX0IF, 0x00); } /* receive buffer 1 */ @@ -900,7 +868,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) } netif_wake_queue(net); } - } mutex_unlock(&priv->mcp_lock); return IRQ_HANDLED; @@ -910,7 +877,7 @@ static int mcp251x_open(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING; + unsigned long flags = 0; int ret; ret = open_candev(net); @@ -926,8 +893,12 @@ static int mcp251x_open(struct net_device *net) priv->tx_skb = NULL; priv->tx_len = 0; + if (!dev_fwnode(&spi->dev)) + flags = IRQF_TRIGGER_FALLING; + ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, - flags | IRQF_ONESHOT, DEVICE_NAME, priv); + flags | IRQF_ONESHOT, dev_name(&spi->dev), + priv); if (ret) { dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); goto out_close; @@ -1014,23 +985,20 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table); static int mcp251x_can_probe(struct spi_device *spi) { - const struct of_device_id *of_id = of_match_device(mcp251x_of_match, - &spi->dev); + const void *match = device_get_match_data(&spi->dev); struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev); struct net_device *net; struct mcp251x_priv *priv; struct clk *clk; int freq, ret; - clk = devm_clk_get(&spi->dev, NULL); - if (IS_ERR(clk)) { - if (pdata) - freq = pdata->oscillator_frequency; - else - return PTR_ERR(clk); - } else { - freq = clk_get_rate(clk); - } + clk = devm_clk_get_optional(&spi->dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + freq = clk_get_rate(clk); + if (freq == 0 && pdata) + freq = pdata->oscillator_frequency; /* Sanity check */ if (freq < 1000000 || freq > 25000000) @@ -1041,11 +1009,9 @@ static int mcp251x_can_probe(struct spi_device *spi) if (!net) return -ENOMEM; - if (!IS_ERR(clk)) { - ret = clk_prepare_enable(clk); - if (ret) - goto out_free; - } + ret = clk_prepare_enable(clk); + if (ret) + goto out_free; net->netdev_ops = &mcp251x_netdev_ops; net->flags |= IFF_ECHO; @@ -1056,8 +1022,8 @@ static int mcp251x_can_probe(struct spi_device *spi) priv->can.clock.freq = freq / 2; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; - if (of_id) - priv->model = (enum mcp251x_model)of_id->data; + if (match) + priv->model = (enum mcp251x_model)match; else priv->model = spi_get_device_id(spi)->driver_data; priv->net = net; @@ -1090,43 +1056,18 @@ static int mcp251x_can_probe(struct spi_device *spi) priv->spi = spi; mutex_init(&priv->mcp_lock); - /* If requested, allocate DMA buffers */ - if (mcp251x_enable_dma) { - spi->dev.coherent_dma_mask = ~0; - - /* - * Minimum coherent DMA allocation is PAGE_SIZE, so allocate - * that much and share it between Tx and Rx DMA buffers. - */ - priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev, - PAGE_SIZE, - &priv->spi_tx_dma, - GFP_DMA); - - if (priv->spi_tx_buf) { - priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2)); - priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma + - (PAGE_SIZE / 2)); - } else { - /* Fall back to non-DMA */ - mcp251x_enable_dma = 0; - } + priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN, + GFP_KERNEL); + if (!priv->spi_tx_buf) { + ret = -ENOMEM; + goto error_probe; } - /* Allocate non-DMA buffers */ - if (!mcp251x_enable_dma) { - priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN, - GFP_KERNEL); - if (!priv->spi_tx_buf) { - ret = -ENOMEM; - goto error_probe; - } - priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN, - GFP_KERNEL); - if (!priv->spi_rx_buf) { - ret = -ENOMEM; - goto error_probe; - } + priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN, + GFP_KERNEL); + if (!priv->spi_rx_buf) { + ret = -ENOMEM; + goto error_probe; } SET_NETDEV_DEV(net, &spi->dev); @@ -1135,7 +1076,8 @@ static int mcp251x_can_probe(struct spi_device *spi) ret = mcp251x_hw_probe(spi); if (ret) { if (ret == -ENODEV) - dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n", priv->model); + dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n", + priv->model); goto error_probe; } @@ -1154,8 +1096,7 @@ error_probe: mcp251x_power_enable(priv->power, 0); out_clk: - if (!IS_ERR(clk)) - clk_disable_unprepare(clk); + clk_disable_unprepare(clk); out_free: free_candev(net); @@ -1173,8 +1114,7 @@ static int mcp251x_can_remove(struct spi_device *spi) mcp251x_power_enable(priv->power, 0); - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); free_candev(net); @@ -1189,8 +1129,7 @@ static int __maybe_unused mcp251x_can_suspend(struct device *dev) priv->force_quit = 1; disable_irq(spi->irq); - /* - * Note: at this point neither IST nor workqueues are running. + /* Note: at this point neither IST nor workqueues are running. * open/stop cannot be called anyway so locking is not needed */ if (netif_running(net)) { @@ -1203,10 +1142,8 @@ static int __maybe_unused mcp251x_can_suspend(struct device *dev) priv->after_suspend = AFTER_SUSPEND_DOWN; } - if (!IS_ERR_OR_NULL(priv->power)) { - regulator_disable(priv->power); - priv->after_suspend |= AFTER_SUSPEND_POWER; - } + mcp251x_power_enable(priv->power, 0); + priv->after_suspend |= AFTER_SUSPEND_POWER; return 0; } diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 093fc9a529f0..f4cd88196404 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -787,7 +787,6 @@ static int sun4ican_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "could not get a valid irq\n"); err = -ENODEV; goto exit; } diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index db6ea936dc3f..f8b19eef5d26 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -5,6 +5,7 @@ * specs for the same is available at <http://www.ti.com> * * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -34,6 +35,7 @@ #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/led.h> +#include <linux/can/rx-offload.h> #define DRV_NAME "ti_hecc" #define HECC_MODULE_VERSION "0.7" @@ -44,8 +46,7 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */ #define MAX_TX_PRIO 0x3F /* hardware value - do not change */ -/* - * Important Note: TX mailbox configuration +/* Important Note: TX mailbox configuration * TX mailboxes should be restricted to the number of SKB buffers to avoid * maintaining SKB buffers separately. TX mailboxes should be a power of 2 * for the mailbox logic to work. Top mailbox numbers are reserved for RX @@ -63,29 +64,15 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT) #define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1) #define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK) -#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1)) -#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX -/* - * Important Note: RX mailbox configuration - * RX mailboxes are further logically split into two - main and buffer - * mailboxes. The goal is to get all packets into main mailboxes as - * driven by mailbox number and receive priority (higher to lower) and - * buffer mailboxes are used to receive pkts while main mailboxes are being - * processed. This ensures in-order packet reception. +/* RX mailbox configuration * - * Here are the recommended values for buffer mailbox. Note that RX mailboxes - * start after TX mailboxes: - * - * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes - * 28 12 8 - * 16 20 4 + * The remaining mailboxes are used for reception and are delivered + * based on their timestamp, to avoid a hardware race when CANME is + * changed while CAN-bus traffic is being received. */ - #define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX) -#define HECC_RX_BUFFER_MBOX 12 /* as per table above */ #define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1) -#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1)) /* TI HECC module registers */ #define HECC_CANME 0x0 /* Mailbox enable */ @@ -117,6 +104,9 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */ #define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */ +/* TI HECC RAM registers */ +#define HECC_CANMOTS 0x80 /* Message object time stamp */ + /* Mailbox registers */ #define HECC_CANMID 0x0 #define HECC_CANMCF 0x4 @@ -193,7 +183,7 @@ static const struct can_bittiming_const ti_hecc_bittiming_const = { struct ti_hecc_priv { struct can_priv can; /* MUST be first member/field */ - struct napi_struct napi; + struct can_rx_offload offload; struct net_device *ndev; struct clk *clk; void __iomem *base; @@ -203,7 +193,6 @@ struct ti_hecc_priv { spinlock_t mbx_lock; /* CANME register needs protection */ u32 tx_head; u32 tx_tail; - u32 rx_next; struct regulator *reg_xceiver; }; @@ -227,8 +216,13 @@ static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val) __raw_writel(val, priv->hecc_ram + mbxno * 4); } +static inline u32 hecc_read_stamp(struct ti_hecc_priv *priv, u32 mbxno) +{ + return __raw_readl(priv->hecc_ram + HECC_CANMOTS + mbxno * 4); +} + static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno, - u32 reg, u32 val) + u32 reg, u32 val) { __raw_writel(val, priv->mbx + mbxno * 0x10 + reg); } @@ -249,13 +243,13 @@ static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg) } static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg, - u32 bit_mask) + u32 bit_mask) { hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask); } static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg, - u32 bit_mask) + u32 bit_mask) { hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask); } @@ -277,8 +271,8 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv) if (bit_timing->brp > 4) can_btc |= HECC_CANBTC_SAM; else - netdev_warn(priv->ndev, "WARN: Triple" - "sampling not set due to h/w limitations"); + netdev_warn(priv->ndev, + "WARN: Triple sampling not set due to h/w limitations"); } can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8; can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16; @@ -314,8 +308,7 @@ static void ti_hecc_reset(struct net_device *ndev) /* Set change control request and wait till enabled */ hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR); - /* - * INFO: It has been observed that at times CCE bit may not be + /* INFO: It has been observed that at times CCE bit may not be * set and hw seems to be ok even if this bit is not set so * timing out with a timing of 1ms to respect the specs */ @@ -325,8 +318,7 @@ static void ti_hecc_reset(struct net_device *ndev) udelay(10); } - /* - * Note: On HECC, BTC can be programmed only in initialization mode, so + /* Note: On HECC, BTC can be programmed only in initialization mode, so * it is expected that the can bittiming parameters are set via ip * utility before the device is opened */ @@ -335,13 +327,11 @@ static void ti_hecc_reset(struct net_device *ndev) /* Clear CCR (and CANMC register) and wait for CCE = 0 enable */ hecc_write(priv, HECC_CANMC, 0); - /* - * INFO: CAN net stack handles bus off and hence disabling auto-bus-on + /* INFO: CAN net stack handles bus off and hence disabling auto-bus-on * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO); */ - /* - * INFO: It has been observed that at times CCE bit may not be + /* INFO: It has been observed that at times CCE bit may not be * set and hw seems to be ok even if this bit is not set so */ cnt = HECC_CCE_WAIT_COUNT; @@ -374,8 +364,8 @@ static void ti_hecc_start(struct net_device *ndev) /* put HECC in initialization mode and set btc */ ti_hecc_reset(ndev); - priv->tx_head = priv->tx_tail = HECC_TX_MASK; - priv->rx_next = HECC_RX_FIRST_MBOX; + priv->tx_head = HECC_TX_MASK; + priv->tx_tail = HECC_TX_MASK; /* Enable local and global acceptance mask registers */ hecc_write(priv, HECC_CANGAM, HECC_SET_REG); @@ -401,7 +391,7 @@ static void ti_hecc_start(struct net_device *ndev) } else { hecc_write(priv, HECC_CANMIL, 0); hecc_write(priv, HECC_CANGIM, - HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN); + HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN); } priv->can.state = CAN_STATE_ERROR_ACTIVE; } @@ -435,7 +425,7 @@ static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode) } static int ti_hecc_get_berr_counter(const struct net_device *ndev, - struct can_berr_counter *bec) + struct can_berr_counter *bec) { struct ti_hecc_priv *priv = netdev_priv(ndev); @@ -445,8 +435,7 @@ static int ti_hecc_get_berr_counter(const struct net_device *ndev, return 0; } -/* - * ti_hecc_xmit: HECC Transmit +/* ti_hecc_xmit: HECC Transmit * * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the * priority of the mailbox for tranmission is dependent upon priority setting @@ -484,8 +473,8 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) spin_unlock_irqrestore(&priv->mbx_lock, flags); netif_stop_queue(ndev); netdev_err(priv->ndev, - "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n", - priv->tx_head, priv->tx_tail); + "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n", + priv->tx_head, priv->tx_tail); return NETDEV_TX_BUSY; } spin_unlock_irqrestore(&priv->mbx_lock, flags); @@ -502,10 +491,10 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) data = (cf->can_id & CAN_SFF_MASK) << 18; hecc_write_mbx(priv, mbxno, HECC_CANMID, data); hecc_write_mbx(priv, mbxno, HECC_CANMDL, - be32_to_cpu(*(__be32 *)(cf->data))); + be32_to_cpu(*(__be32 *)(cf->data))); if (cf->can_dlc > 4) hecc_write_mbx(priv, mbxno, HECC_CANMDH, - be32_to_cpu(*(__be32 *)(cf->data + 4))); + be32_to_cpu(*(__be32 *)(cf->data + 4))); else *(u32 *)(cf->data + 4) = 0; can_put_echo_skb(skb, ndev, mbxno); @@ -513,7 +502,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) spin_lock_irqsave(&priv->mbx_lock, flags); --priv->tx_head; if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) || - (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) { + (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) { netif_stop_queue(ndev); } hecc_set_bit(priv, HECC_CANME, mbx_mask); @@ -526,139 +515,57 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } -static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno) +static inline +struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload) { - struct net_device_stats *stats = &priv->ndev->stats; - struct can_frame *cf; - struct sk_buff *skb; - u32 data, mbx_mask; - unsigned long flags; + return container_of(offload, struct ti_hecc_priv, offload); +} - skb = alloc_can_skb(priv->ndev, &cf); - if (!skb) { - if (printk_ratelimit()) - netdev_err(priv->ndev, - "ti_hecc_rx_pkt: alloc_can_skb() failed\n"); - return -ENOMEM; - } +static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload, + struct can_frame *cf, + u32 *timestamp, unsigned int mbxno) +{ + struct ti_hecc_priv *priv = rx_offload_to_priv(offload); + u32 data; - mbx_mask = BIT(mbxno); data = hecc_read_mbx(priv, mbxno, HECC_CANMID); if (data & HECC_CANMID_IDE) cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (data >> 18) & CAN_SFF_MASK; + data = hecc_read_mbx(priv, mbxno, HECC_CANMCF); if (data & HECC_CANMCF_RTR) cf->can_id |= CAN_RTR_FLAG; cf->can_dlc = get_can_dlc(data & 0xF); + data = hecc_read_mbx(priv, mbxno, HECC_CANMDL); *(__be32 *)(cf->data) = cpu_to_be32(data); if (cf->can_dlc > 4) { data = hecc_read_mbx(priv, mbxno, HECC_CANMDH); *(__be32 *)(cf->data + 4) = cpu_to_be32(data); } - spin_lock_irqsave(&priv->mbx_lock, flags); - hecc_clear_bit(priv, HECC_CANME, mbx_mask); - hecc_write(priv, HECC_CANRMP, mbx_mask); - /* enable mailbox only if it is part of rx buffer mailboxes */ - if (priv->rx_next < HECC_RX_BUFFER_MBOX) - hecc_set_bit(priv, HECC_CANME, mbx_mask); - spin_unlock_irqrestore(&priv->mbx_lock, flags); - stats->rx_bytes += cf->can_dlc; - can_led_event(priv->ndev, CAN_LED_EVENT_RX); - netif_receive_skb(skb); - stats->rx_packets++; + *timestamp = hecc_read_stamp(priv, mbxno); - return 0; -} - -/* - * ti_hecc_rx_poll - HECC receive pkts - * - * The receive mailboxes start from highest numbered mailbox till last xmit - * mailbox. On CAN frame reception the hardware places the data into highest - * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes - * have same filtering (ALL CAN frames) packets will arrive in the highest - * available RX mailbox and we need to ensure in-order packet reception. - * - * To ensure the packets are received in the right order we logically divide - * the RX mailboxes into main and buffer mailboxes. Packets are received as per - * mailbox priotity (higher to lower) in the main bank and once it is full we - * disable further reception into main mailboxes. While the main mailboxes are - * processed in NAPI, further packets are received in buffer mailboxes. - * - * We maintain a RX next mailbox counter to process packets and once all main - * mailboxe packets are passed to the upper stack we enable all of them but - * continue to process packets received in buffer mailboxes. With each packet - * received from buffer mailbox we enable it immediately so as to handle the - * overflow from higher mailboxes. - */ -static int ti_hecc_rx_poll(struct napi_struct *napi, int quota) -{ - struct net_device *ndev = napi->dev; - struct ti_hecc_priv *priv = netdev_priv(ndev); - u32 num_pkts = 0; - u32 mbx_mask; - unsigned long pending_pkts, flags; - - if (!netif_running(ndev)) - return 0; - - while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) && - num_pkts < quota) { - mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */ - if (mbx_mask & pending_pkts) { - if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0) - return num_pkts; - ++num_pkts; - } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) { - break; /* pkt not received yet */ - } - --priv->rx_next; - if (priv->rx_next == HECC_RX_BUFFER_MBOX) { - /* enable high bank mailboxes */ - spin_lock_irqsave(&priv->mbx_lock, flags); - mbx_mask = hecc_read(priv, HECC_CANME); - mbx_mask |= HECC_RX_HIGH_MBOX_MASK; - hecc_write(priv, HECC_CANME, mbx_mask); - spin_unlock_irqrestore(&priv->mbx_lock, flags); - } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) { - priv->rx_next = HECC_RX_FIRST_MBOX; - break; - } - } - - /* Enable packet interrupt if all pkts are handled */ - if (hecc_read(priv, HECC_CANRMP) == 0) { - napi_complete(napi); - /* Re-enable RX mailbox interrupts */ - mbx_mask = hecc_read(priv, HECC_CANMIM); - mbx_mask |= HECC_TX_MBOX_MASK; - hecc_write(priv, HECC_CANMIM, mbx_mask); - } else { - /* repoll is done only if whole budget is used */ - num_pkts = quota; - } - - return num_pkts; + return 1; } static int ti_hecc_error(struct net_device *ndev, int int_status, - int err_status) + int err_status) { struct ti_hecc_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; + u32 timestamp; /* propagate the error condition to the can stack */ skb = alloc_can_err_skb(ndev, &cf); if (!skb) { if (printk_ratelimit()) netdev_err(priv->ndev, - "ti_hecc_error: alloc_can_err_skb() failed\n"); + "%s: alloc_can_err_skb() failed\n", + __func__); return -ENOMEM; } @@ -692,8 +599,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); } - /* - * Need to check busoff condition in error status register too to + /* Need to check busoff condition in error status register too to * ensure warning interrupts don't hog the system */ if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) { @@ -732,9 +638,8 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, } } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); + timestamp = hecc_read(priv, HECC_CANLNT); + can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); return 0; } @@ -744,19 +649,20 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) struct net_device *ndev = (struct net_device *)dev_id; struct ti_hecc_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; - u32 mbxno, mbx_mask, int_status, err_status; - unsigned long ack, flags; + u32 mbxno, mbx_mask, int_status, err_status, stamp; + unsigned long flags, rx_pending; int_status = hecc_read(priv, - (priv->use_hecc1int) ? HECC_CANGIF1 : HECC_CANGIF0); + priv->use_hecc1int ? + HECC_CANGIF1 : HECC_CANGIF0); if (!int_status) return IRQ_NONE; err_status = hecc_read(priv, HECC_CANES); if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO | - HECC_CANES_EP | HECC_CANES_EW)) - ti_hecc_error(ndev, int_status, err_status); + HECC_CANES_EP | HECC_CANES_EW)) + ti_hecc_error(ndev, int_status, err_status); if (int_status & HECC_CANGIF_GMIF) { while (priv->tx_tail - priv->tx_head > 0) { @@ -769,27 +675,27 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) spin_lock_irqsave(&priv->mbx_lock, flags); hecc_clear_bit(priv, HECC_CANME, mbx_mask); spin_unlock_irqrestore(&priv->mbx_lock, flags); - stats->tx_bytes += hecc_read_mbx(priv, mbxno, - HECC_CANMCF) & 0xF; + stamp = hecc_read_stamp(priv, mbxno); + stats->tx_bytes += + can_rx_offload_get_echo_skb(&priv->offload, + mbxno, stamp); stats->tx_packets++; can_led_event(ndev, CAN_LED_EVENT_TX); - can_get_echo_skb(ndev, mbxno); --priv->tx_tail; } /* restart queue if wrap-up or if queue stalled on last pkt */ - if (((priv->tx_head == priv->tx_tail) && - ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) || - (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) && - ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK))) + if ((priv->tx_head == priv->tx_tail && + ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) || + (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) && + ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK))) netif_wake_queue(ndev); - /* Disable RX mailbox interrupts and let NAPI reenable them */ - if (hecc_read(priv, HECC_CANRMP)) { - ack = hecc_read(priv, HECC_CANMIM); - ack &= BIT(HECC_MAX_TX_MBOX) - 1; - hecc_write(priv, HECC_CANMIM, ack); - napi_schedule(&priv->napi); + /* offload RX mailboxes and let NAPI deliver them */ + while ((rx_pending = hecc_read(priv, HECC_CANRMP))) { + can_rx_offload_irq_offload_timestamp(&priv->offload, + rx_pending); + hecc_write(priv, HECC_CANRMP, rx_pending); } } @@ -811,7 +717,7 @@ static int ti_hecc_open(struct net_device *ndev) int err; err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED, - ndev->name, ndev); + ndev->name, ndev); if (err) { netdev_err(ndev, "error requesting interrupt\n"); return err; @@ -831,7 +737,7 @@ static int ti_hecc_open(struct net_device *ndev) can_led_event(ndev, CAN_LED_EVENT_OPEN); ti_hecc_start(ndev); - napi_enable(&priv->napi); + can_rx_offload_enable(&priv->offload); netif_start_queue(ndev); return 0; @@ -842,7 +748,7 @@ static int ti_hecc_close(struct net_device *ndev) struct ti_hecc_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); - napi_disable(&priv->napi); + can_rx_offload_disable(&priv->offload); ti_hecc_stop(ndev); free_irq(ndev->irq, ndev); close_candev(ndev); @@ -962,8 +868,6 @@ static int ti_hecc_probe(struct platform_device *pdev) goto probe_exit_candev; } priv->can.clock.freq = clk_get_rate(priv->clk); - netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, - HECC_DEF_NAPI_WEIGHT); err = clk_prepare_enable(priv->clk); if (err) { @@ -971,19 +875,30 @@ static int ti_hecc_probe(struct platform_device *pdev) goto probe_exit_clk; } + priv->offload.mailbox_read = ti_hecc_mailbox_read; + priv->offload.mb_first = HECC_RX_FIRST_MBOX; + priv->offload.mb_last = HECC_MAX_TX_MBOX; + err = can_rx_offload_add_timestamp(ndev, &priv->offload); + if (err) { + dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); + goto probe_exit_clk; + } + err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed\n"); - goto probe_exit_clk; + goto probe_exit_offload; } devm_can_led_init(ndev); dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n", - priv->base, (u32) ndev->irq); + priv->base, (u32)ndev->irq); return 0; +probe_exit_offload: + can_rx_offload_del(&priv->offload); probe_exit_clk: clk_put(priv->clk); probe_exit_candev: @@ -1000,6 +915,7 @@ static int ti_hecc_remove(struct platform_device *pdev) unregister_candev(ndev); clk_disable_unprepare(priv->clk); clk_put(priv->clk); + can_rx_offload_del(&priv->offload); free_candev(ndev); return 0; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index c89c7d4900d7..0f1d3e807d63 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -643,8 +643,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, return err; } - netdev = alloc_candev(sizeof(*priv) + - dev->max_tx_urbs * sizeof(*priv->tx_contexts), + netdev = alloc_candev(struct_size(priv, tx_contexts, dev->max_tx_urbs), dev->max_tx_urbs); if (!netdev) { dev_err(&dev->intf->dev, "Cannot alloc candev\n"); diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index d200a5b0651c..39ca14b0585d 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -1,5 +1,4 @@ -/* - * vcan.c - Virtual CAN interface +/* vcan.c - Virtual CAN interface * * Copyright (c) 2002-2017 Volkswagen Group Electronic Research * All rights reserved. @@ -39,12 +38,15 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/can.h> +#include <linux/can/can-ml.h> #include <linux/can/dev.h> #include <linux/can/skb.h> #include <linux/slab.h> @@ -57,9 +59,7 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>"); MODULE_ALIAS_RTNL_LINK(DRV_NAME); - -/* - * CAN test feature: +/* CAN test feature: * Enable the echo on driver level for testing the CAN core echo modes. * See Documentation/networking/can.rst for details. */ @@ -68,7 +68,6 @@ static bool echo; /* echo testing. Default: 0 (Off) */ module_param(echo, bool, 0444); MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)"); - static void vcan_rx(struct sk_buff *skb, struct net_device *dev) { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; @@ -101,10 +100,8 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) if (!echo) { /* no echo handling available inside this driver */ - if (loop) { - /* - * only count the packets here, because the + /* only count the packets here, because the * CAN core already did the echo for us */ stats->rx_packets++; @@ -117,7 +114,6 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) /* perform standard echo handling for CAN network interfaces */ if (loop) { - skb = can_create_echo_skb(skb); if (!skb) return NETDEV_TX_OK; @@ -157,6 +153,7 @@ static void vcan_setup(struct net_device *dev) dev->addr_len = 0; dev->tx_queue_len = 0; dev->flags = IFF_NOARP; + dev->ml_priv = netdev_priv(dev); /* set flags according to driver capabilities */ if (echo) @@ -167,16 +164,17 @@ static void vcan_setup(struct net_device *dev) } static struct rtnl_link_ops vcan_link_ops __read_mostly = { - .kind = DRV_NAME, - .setup = vcan_setup, + .kind = DRV_NAME, + .priv_size = sizeof(struct can_ml_priv), + .setup = vcan_setup, }; static __init int vcan_init_module(void) { - pr_info("vcan: Virtual CAN interface driver\n"); + pr_info("Virtual CAN interface driver\n"); if (echo) - printk(KERN_INFO "vcan: enabled echo on driver level.\n"); + pr_info("enabled echo on driver level.\n"); return rtnl_link_register(&vcan_link_ops); } diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index b2106292230e..d6ba9426be4d 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -18,6 +18,7 @@ #include <linux/can/dev.h> #include <linux/can/skb.h> #include <linux/can/vxcan.h> +#include <linux/can/can-ml.h> #include <linux/slab.h> #include <net/rtnetlink.h> @@ -146,6 +147,7 @@ static void vxcan_setup(struct net_device *dev) dev->flags = (IFF_NOARP|IFF_ECHO); dev->netdev_ops = &vxcan_netdev_ops; dev->needs_free_netdev = true; + dev->ml_priv = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN); } /* forward declaration for rtnl_create_link() */ @@ -281,7 +283,7 @@ static struct net *vxcan_get_link_net(const struct net_device *dev) static struct rtnl_link_ops vxcan_link_ops = { .kind = DRV_NAME, - .priv_size = sizeof(struct vxcan_priv), + .priv_size = ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv), .setup = vxcan_setup, .newlink = vxcan_newlink, .dellink = vxcan_dellink, diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 63203ff452b5..911b34316c9d 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -50,6 +50,10 @@ enum xcan_reg { XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ /* only on CAN FD cores */ + XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate + * Prescalar + */ + XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */ XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ @@ -62,6 +66,7 @@ enum xcan_reg { #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) +#define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08) #define XCAN_CANFD_FRAME_SIZE 0x48 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ @@ -118,8 +123,12 @@ enum xcan_reg { #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ +#define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */ #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ +#define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */ +#define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */ +#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */ /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ @@ -133,6 +142,7 @@ enum xcan_reg { /* CAN frame length constants */ #define XCAN_FRAME_MAX_DATA_LEN 8 +#define XCANFD_DW_BYTES 4 #define XCAN_TIMEOUT (1 * HZ) /* TX-FIFO-empty interrupt available */ @@ -149,7 +159,15 @@ enum xcan_reg { #define XCAN_FLAG_RX_FIFO_MULTI 0x0010 #define XCAN_FLAG_CANFD_2 0x0020 +enum xcan_ip_type { + XAXI_CAN = 0, + XZYNQ_CANPS, + XAXI_CANFD, + XAXI_CANFD_2_0, +}; + struct xcan_devtype_data { + enum xcan_ip_type cantype; unsigned int flags; const struct can_bittiming_const *bittiming_const; const char *bus_clk_name; @@ -183,7 +201,7 @@ struct xcan_priv { struct napi_struct napi; u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, - u32 val); + u32 val); struct device *dev; void __iomem *reg_base; unsigned long irq_flags; @@ -205,6 +223,7 @@ static const struct can_bittiming_const xcan_bittiming_const = { .brp_inc = 1, }; +/* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */ static const struct can_bittiming_const xcan_bittiming_const_canfd = { .name = DRIVER_NAME, .tseg1_min = 1, @@ -217,6 +236,20 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = { .brp_inc = 1, }; +/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ +static struct can_bittiming_const xcan_data_bittiming_const_canfd = { + .name = DRIVER_NAME, + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +/* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { .name = DRIVER_NAME, .tseg1_min = 1, @@ -229,6 +262,19 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { .brp_inc = 1, }; +/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ +static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { + .name = DRIVER_NAME, + .tseg1_min = 1, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + /** * xcan_write_reg_le - Write a value to the device register little endian * @priv: Driver private data structure @@ -238,7 +284,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { * Write data to the paricular CAN register */ static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg, - u32 val) + u32 val) { iowrite32(val, priv->reg_base + reg); } @@ -265,7 +311,7 @@ static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg) * Write data to the paricular CAN register */ static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg, - u32 val) + u32 val) { iowrite32be(val, priv->reg_base + reg); } @@ -343,6 +389,7 @@ static int xcan_set_bittiming(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); struct can_bittiming *bt = &priv->can.bittiming; + struct can_bittiming *dbt = &priv->can.data_bittiming; u32 btr0, btr1; u32 is_config_mode; @@ -372,9 +419,27 @@ static int xcan_set_bittiming(struct net_device *ndev) priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); + if (priv->devtype.cantype == XAXI_CANFD || + priv->devtype.cantype == XAXI_CANFD_2_0) { + /* Setting Baud Rate prescalar value in F_BRPR Register */ + btr0 = dbt->brp - 1; + + /* Setting Time Segment 1 in BTR Register */ + btr1 = dbt->prop_seg + dbt->phase_seg1 - 1; + + /* Setting Time Segment 2 in BTR Register */ + btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; + + /* Setting Synchronous jump width in BTR Register */ + btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift; + + priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0); + priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1); + } + netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n", - priv->read_reg(priv, XCAN_BRPR_OFFSET), - priv->read_reg(priv, XCAN_BTR_OFFSET)); + priv->read_reg(priv, XCAN_BRPR_OFFSET), + priv->read_reg(priv, XCAN_BTR_OFFSET)); return 0; } @@ -392,9 +457,8 @@ static int xcan_set_bittiming(struct net_device *ndev) static int xcan_chip_start(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); - u32 reg_msr, reg_sr_mask; + u32 reg_msr; int err; - unsigned long timeout; u32 ier; /* Check if it is in reset mode */ @@ -420,10 +484,8 @@ static int xcan_chip_start(struct net_device *ndev) /* Check whether it is loopback mode or normal mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { reg_msr = XCAN_MSR_LBACK_MASK; - reg_sr_mask = XCAN_SR_LBACK_MASK; } else { reg_msr = 0x0; - reg_sr_mask = XCAN_SR_NORMAL_MASK; } /* enable the first extended filter, if any, as cores with extended @@ -435,16 +497,8 @@ static int xcan_chip_start(struct net_device *ndev) priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); - timeout = jiffies + XCAN_TIMEOUT; - while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) { - if (time_after(jiffies, timeout)) { - netdev_warn(ndev, - "timed out for correct mode\n"); - return -ETIMEDOUT; - } - } netdev_dbg(ndev, "status:#x%08x\n", - priv->read_reg(priv, XCAN_SR_OFFSET)); + priv->read_reg(priv, XCAN_SR_OFFSET)); priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; @@ -483,6 +537,7 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) /** * xcan_write_frame - Write a frame to HW + * @priv: Driver private data structure * @skb: sk_buff pointer that contains data to be Txed * @frame_offset: Register offset to write the frame to */ @@ -490,7 +545,8 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, int frame_offset) { u32 id, dlc, data[2] = {0, 0}; - struct can_frame *cf = (struct can_frame *)skb->data; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 ramoff, dwindex = 0, i; /* Watch carefully on the bit sequence */ if (cf->can_id & CAN_EFF_FLAG) { @@ -498,7 +554,7 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) & XCAN_IDR_ID2_MASK; id |= (((cf->can_id & CAN_EFF_MASK) >> - (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) << + (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; /* The substibute remote TX request bit should be "1" @@ -519,31 +575,51 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, id |= XCAN_IDR_SRR_MASK; } - dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT; - - if (cf->can_dlc > 0) - data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); - if (cf->can_dlc > 4) - data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); + dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; + if (can_is_canfd_skb(skb)) { + if (cf->flags & CANFD_BRS) + dlc |= XCAN_DLCR_BRS_MASK; + dlc |= XCAN_DLCR_EDL_MASK; + } priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); /* If the CAN frame is RTR frame this write triggers transmission * (not on CAN FD) */ priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); - if (!(cf->can_id & CAN_RTR_FLAG)) { - priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset), - data[0]); - /* If the CAN frame is Standard/Extended frame this - * write triggers transmission (not on CAN FD) - */ - priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset), - data[1]); + if (priv->devtype.cantype == XAXI_CANFD || + priv->devtype.cantype == XAXI_CANFD_2_0) { + for (i = 0; i < cf->len; i += 4) { + ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) + + (dwindex * XCANFD_DW_BYTES); + priv->write_reg(priv, ramoff, + be32_to_cpup((__be32 *)(cf->data + i))); + dwindex++; + } + } else { + if (cf->len > 0) + data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); + if (cf->len > 4) + data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); + + if (!(cf->can_id & CAN_RTR_FLAG)) { + priv->write_reg(priv, + XCAN_FRAME_DW1_OFFSET(frame_offset), + data[0]); + /* If the CAN frame is Standard/Extended frame this + * write triggers transmission (not on CAN FD) + */ + priv->write_reg(priv, + XCAN_FRAME_DW2_OFFSET(frame_offset), + data[1]); + } } } /** * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) + * @skb: sk_buff pointer that contains data to be Txed + * @ndev: Pointer to net_device structure * * Return: 0 on success, -ENOSPC if FIFO is full. */ @@ -580,6 +656,8 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) /** * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) + * @skb: sk_buff pointer that contains data to be Txed + * @ndev: Pointer to net_device structure * * Return: 0 on success, -ENOSPC if there is no space */ @@ -712,6 +790,88 @@ static int xcan_rx(struct net_device *ndev, int frame_base) } /** + * xcanfd_rx - Is called from CAN isr to complete the received + * frame processing + * @ndev: Pointer to net_device structure + * @frame_base: Register offset to the frame to be read + * + * This function is invoked from the CAN isr(poll) to process the Rx frames. It + * does minimal processing and invokes "netif_receive_skb" to complete further + * processing. + * Return: 1 on success and 0 on failure. + */ +static int xcanfd_rx(struct net_device *ndev, int frame_base) +{ + struct xcan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct canfd_frame *cf; + struct sk_buff *skb; + u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset; + + id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); + dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)); + if (dlc & XCAN_DLCR_EDL_MASK) + skb = alloc_canfd_skb(ndev, &cf); + else + skb = alloc_can_skb(ndev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + stats->rx_dropped++; + return 0; + } + + /* Change Xilinx CANFD data length format to socketCAN data + * format + */ + if (dlc & XCAN_DLCR_EDL_MASK) + cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> + XCAN_DLCR_DLC_SHIFT); + else + cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >> + XCAN_DLCR_DLC_SHIFT); + + /* Change Xilinx CAN ID format to socketCAN ID format */ + if (id_xcan & XCAN_IDR_IDE_MASK) { + /* The received frame is an Extended format frame */ + cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; + cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> + XCAN_IDR_ID2_SHIFT; + cf->can_id |= CAN_EFF_FLAG; + if (id_xcan & XCAN_IDR_RTR_MASK) + cf->can_id |= CAN_RTR_FLAG; + } else { + /* The received frame is a standard format frame */ + cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> + XCAN_IDR_ID1_SHIFT; + if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan & + XCAN_IDR_SRR_MASK)) + cf->can_id |= CAN_RTR_FLAG; + } + + /* Check the frame received is FD or not*/ + if (dlc & XCAN_DLCR_EDL_MASK) { + for (i = 0; i < cf->len; i += 4) { + dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) + + (dwindex * XCANFD_DW_BYTES); + data[0] = priv->read_reg(priv, dw_offset); + *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); + dwindex++; + } + } else { + for (i = 0; i < cf->len; i += 4) { + dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base); + data[0] = priv->read_reg(priv, dw_offset + i); + *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); + } + } + stats->rx_bytes += cf->len; + stats->rx_packets++; + netif_receive_skb(skb); + + return 1; +} + +/** * xcan_current_error_state - Get current error state from HW * @ndev: Pointer to net_device structure * @@ -924,7 +1084,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } } - priv->can.can_stats.bus_error++; + priv->can.can_stats.bus_error++; } if (skb) { @@ -934,7 +1094,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) } netdev_dbg(ndev, "%s: error status register:0x%x\n", - __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); + __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); } /** @@ -960,6 +1120,7 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr) /** * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame + * @priv: Driver private data structure * * Return: Register offset of the next frame in RX FIFO. */ @@ -968,7 +1129,7 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) int offset; if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) { - u32 fsr; + u32 fsr, mask; /* clear RXOK before the is-empty check so that any newly * received frame will reassert it without a race @@ -978,13 +1139,20 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); /* check if RX FIFO is empty */ - if (!(fsr & XCAN_FSR_FL_MASK)) + if (priv->devtype.flags & XCAN_FLAG_CANFD_2) + mask = XCAN_2_FSR_FL_MASK; + else + mask = XCAN_FSR_FL_MASK; + + if (!(fsr & mask)) return -ENOENT; if (priv->devtype.flags & XCAN_FLAG_CANFD_2) - offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); + offset = + XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK); else - offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); + offset = + XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); } else { /* check if RX FIFO is empty */ @@ -1019,7 +1187,10 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && (work_done < quota)) { - work_done += xcan_rx(ndev, frame_offset); + if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK) + work_done += xcanfd_rx(ndev, frame_offset); + else + work_done += xcan_rx(ndev, frame_offset); if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) /* increment read index */ @@ -1094,8 +1265,10 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) * via TXFEMP handling as we read TXFEMP *after* TXOK * clear to satisfy (1). */ - while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { - priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + while ((isr & XCAN_IXR_TXOK_MASK) && + !WARN_ON(++retries == 100)) { + priv->write_reg(priv, XCAN_ICR_OFFSET, + XCAN_IXR_TXOK_MASK); isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } @@ -1208,12 +1381,12 @@ static int xcan_open(struct net_device *ndev) ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", - __func__, ret); + __func__, ret); return ret; } ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, - ndev->name, ndev); + ndev->name, ndev); if (ret < 0) { netdev_err(ndev, "irq allocation for CAN failed\n"); goto err; @@ -1284,7 +1457,7 @@ static int xcan_close(struct net_device *ndev) * Return: 0 on success and failure value on error */ static int xcan_get_berr_counter(const struct net_device *ndev, - struct can_berr_counter *bec) + struct can_berr_counter *bec) { struct xcan_priv *priv = netdev_priv(ndev); int ret; @@ -1292,7 +1465,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev, ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", - __func__, ret); + __func__, ret); return ret; } @@ -1305,7 +1478,6 @@ static int xcan_get_berr_counter(const struct net_device *ndev, return 0; } - static const struct net_device_ops xcan_netdev_ops = { .ndo_open = xcan_open, .ndo_stop = xcan_close, @@ -1417,6 +1589,8 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { }; static const struct xcan_devtype_data xcan_zynq_data = { + .cantype = XZYNQ_CANPS, + .flags = XCAN_FLAG_TXFEMP, .bittiming_const = &xcan_bittiming_const, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, @@ -1424,6 +1598,8 @@ static const struct xcan_devtype_data xcan_zynq_data = { }; static const struct xcan_devtype_data xcan_axi_data = { + .cantype = XAXI_CAN, + .flags = XCAN_FLAG_TXFEMP, .bittiming_const = &xcan_bittiming_const, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, @@ -1431,6 +1607,7 @@ static const struct xcan_devtype_data xcan_axi_data = { }; static const struct xcan_devtype_data xcan_canfd_data = { + .cantype = XAXI_CANFD, .flags = XCAN_FLAG_EXT_FILTERS | XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | @@ -1442,6 +1619,7 @@ static const struct xcan_devtype_data xcan_canfd_data = { }; static const struct xcan_devtype_data xcan_canfd2_data = { + .cantype = XAXI_CANFD_2_0, .flags = XCAN_FLAG_EXT_FILTERS | XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | @@ -1554,6 +1732,19 @@ static int xcan_probe(struct platform_device *pdev) priv->can.do_get_berr_counter = xcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_BERR_REPORTING; + + if (devtype->cantype == XAXI_CANFD) + priv->can.data_bittiming_const = + &xcan_data_bittiming_const_canfd; + + if (devtype->cantype == XAXI_CANFD_2_0) + priv->can.data_bittiming_const = + &xcan_data_bittiming_const_canfd2; + + if (devtype->cantype == XAXI_CANFD || + devtype->cantype == XAXI_CANFD_2_0) + priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + priv->reg_base = addr; priv->tx_max = tx_max; priv->devtype = *devtype; @@ -1570,7 +1761,8 @@ static int xcan_probe(struct platform_device *pdev) /* Getting the CAN can_clk info */ priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); if (IS_ERR(priv->can_clk)) { - dev_err(&pdev->dev, "Device clock not found.\n"); + if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Device clock not found.\n"); ret = PTR_ERR(priv->can_clk); goto err_free; } @@ -1589,7 +1781,7 @@ static int xcan_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", - __func__, ret); + __func__, ret); goto err_pmdisable; } diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 907af62846ba..526ba2ab66f1 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -342,6 +342,13 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); mgmt |= B53_MII_DUMB_FWDG_EN; b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); + + /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether + * frames should be flooded or not. + */ + b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); + mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN; + b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); } static void b53_enable_vlan(struct b53_device *dev, bool enable, @@ -510,10 +517,15 @@ EXPORT_SYMBOL(b53_imp_vlan_setup); int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct b53_device *dev = ds->priv; - unsigned int cpu_port = ds->ports[port].cpu_dp->index; + unsigned int cpu_port; int ret = 0; u16 pvlan; + if (!dsa_is_user_port(ds, port)) + return 0; + + cpu_port = ds->ports[port].cpu_dp->index; + if (dev->ops->irq_enable) ret = dev->ops->irq_enable(dev, port); if (ret) @@ -1748,6 +1760,31 @@ void b53_br_fast_age(struct dsa_switch *ds, int port) } EXPORT_SYMBOL(b53_br_fast_age); +int b53_br_egress_floods(struct dsa_switch *ds, int port, + bool unicast, bool multicast) +{ + struct b53_device *dev = ds->priv; + u16 uc, mc; + + b53_read16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, &uc); + if (unicast) + uc |= BIT(port); + else + uc &= ~BIT(port); + b53_write16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, uc); + + b53_read16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, &mc); + if (multicast) + mc |= BIT(port); + else + mc &= ~BIT(port); + b53_write16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, mc); + + return 0; + +} +EXPORT_SYMBOL(b53_br_egress_floods); + static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) { /* Broadcom switches will accept enabling Broadcom tags on the @@ -1948,6 +1985,7 @@ static const struct dsa_switch_ops b53_switch_ops = { .port_bridge_leave = b53_br_leave, .port_stp_state_set = b53_br_set_stp_state, .port_fast_age = b53_br_fast_age, + .port_egress_floods = b53_br_egress_floods, .port_vlan_filtering = b53_vlan_filtering, .port_vlan_prepare = b53_vlan_prepare, .port_vlan_add = b53_vlan_add, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index f25bc80c4ffc..a7dd8acc281b 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -319,6 +319,8 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); void b53_br_fast_age(struct dsa_switch *ds, int port); +int b53_br_egress_floods(struct dsa_switch *ds, int port, + bool unicast, bool multicast); void b53_port_event(struct dsa_switch *ds, int port); void b53_phylink_validate(struct dsa_switch *ds, int port, unsigned long *supported, diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index d9c56a779c08..0a1be5259be0 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -536,7 +536,6 @@ static void b53_srab_mux_init(struct platform_device *pdev) struct b53_device *dev = platform_get_drvdata(pdev); struct b53_srab_priv *priv = dev->priv; struct b53_srab_port_priv *p; - struct resource *r; unsigned int port; u32 reg, off = 0; int ret; @@ -544,8 +543,7 @@ static void b53_srab_mux_init(struct platform_device *pdev) if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) return; - r = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->mux_config = devm_ioremap_resource(&pdev->dev, r); + priv->mux_config = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(priv->mux_config)) return; @@ -593,7 +591,6 @@ static int b53_srab_probe(struct platform_device *pdev) const struct of_device_id *of_id = NULL; struct b53_srab_priv *priv; struct b53_device *dev; - struct resource *r; if (dn) of_id = of_match_node(b53_srab_of_match, dn); @@ -610,8 +607,7 @@ static int b53_srab_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->regs = devm_ioremap_resource(&pdev->dev, r); + priv->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs)) return -ENOMEM; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 28c963a21dac..26509fa37a50 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -157,6 +157,9 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, unsigned int i; u32 reg; + if (!dsa_is_user_port(ds, port)) + return 0; + /* Clear the memory power down */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); @@ -1047,7 +1050,6 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) struct b53_device *dev; struct dsa_switch *ds; void __iomem **base; - struct resource *r; unsigned int i; u32 reg, rev; int ret; @@ -1113,8 +1115,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - r = platform_get_resource(pdev, IORESOURCE_MEM, i); - *base = devm_ioremap_resource(&pdev->dev, r); + *base = devm_platform_ioremap_resource(pdev, i); if (IS_ERR(*base)) { pr_err("unable to find register: %s\n", reg_names[i]); return PTR_ERR(*base); diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 7a2063e7737a..bbec86b9418e 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1079,6 +1079,9 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port, { struct lan9303 *chip = ds->priv; + if (!dsa_is_user_port(ds, port)) + return 0; + return lan9303_enable_processing_port(chip, port); } @@ -1086,6 +1089,9 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port) { struct lan9303 *chip = ds->priv; + if (!dsa_is_user_port(ds, port)) + return; + lan9303_disable_processing_port(chip, port); lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN); } diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 4e64835deac2..a69c9b9878b7 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -642,6 +642,9 @@ static int gswip_port_enable(struct dsa_switch *ds, int port, struct gswip_priv *priv = ds->priv; int err; + if (!dsa_is_user_port(ds, port)) + return 0; + if (!dsa_is_cpu_port(ds, port)) { err = gswip_add_single_port_br(priv, port, true); if (err) @@ -678,6 +681,9 @@ static void gswip_port_disable(struct dsa_switch *ds, int port) { struct gswip_priv *priv = ds->priv; + if (!dsa_is_user_port(ds, port)) + return; + if (!dsa_is_cpu_port(ds, port)) { gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN, GSWIP_MDIO_PHY_LINK_MASK, @@ -1822,7 +1828,6 @@ remove_gphy: static int gswip_probe(struct platform_device *pdev) { struct gswip_priv *priv; - struct resource *gswip_res, *mdio_res, *mii_res; struct device_node *mdio_np, *gphy_fw_np; struct device *dev = &pdev->dev; int err; @@ -1833,18 +1838,15 @@ static int gswip_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - gswip_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->gswip = devm_ioremap_resource(dev, gswip_res); + priv->gswip = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->gswip)) return PTR_ERR(priv->gswip); - mdio_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->mdio = devm_ioremap_resource(dev, mdio_res); + priv->mdio = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(priv->mdio)) return PTR_ERR(priv->mdio); - mii_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - priv->mii = devm_ioremap_resource(dev, mii_res); + priv->mii = devm_platform_ioremap_resource(pdev, 2); if (IS_ERR(priv->mii)) return PTR_ERR(priv->mii); diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig index fe0a13b79c4b..1d7870c6df3c 100644 --- a/drivers/net/dsa/microchip/Kconfig +++ b/drivers/net/dsa/microchip/Kconfig @@ -5,14 +5,37 @@ config NET_DSA_MICROCHIP_KSZ_COMMON menuconfig NET_DSA_MICROCHIP_KSZ9477 tristate "Microchip KSZ9477 series switch support" depends on NET_DSA - select NET_DSA_TAG_KSZ9477 select NET_DSA_MICROCHIP_KSZ_COMMON help This driver adds support for Microchip KSZ9477 switch chips. +config NET_DSA_MICROCHIP_KSZ9477_I2C + tristate "KSZ9477 series I2C connected switch driver" + depends on NET_DSA_MICROCHIP_KSZ9477 && I2C + select REGMAP_I2C + help + Select to enable support for registering switches configured through I2C. + config NET_DSA_MICROCHIP_KSZ9477_SPI tristate "KSZ9477 series SPI connected switch driver" depends on NET_DSA_MICROCHIP_KSZ9477 && SPI select REGMAP_SPI help Select to enable support for registering switches configured through SPI. + +menuconfig NET_DSA_MICROCHIP_KSZ8795 + tristate "Microchip KSZ8795 series switch support" + depends on NET_DSA + select NET_DSA_MICROCHIP_KSZ_COMMON + help + This driver adds support for Microchip KSZ8795 switch chips. + +config NET_DSA_MICROCHIP_KSZ8795_SPI + tristate "KSZ8795 series SPI connected switch driver" + depends on NET_DSA_MICROCHIP_KSZ8795 && SPI + select REGMAP_SPI + help + This driver accesses KSZ8795 chip through SPI. + + It is required to use the KSZ8795 switch driver as the only access + is through SPI. diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile index 68451b02f775..929caa81e782 100644 --- a/drivers/net/dsa/microchip/Makefile +++ b/drivers/net/dsa/microchip/Makefile @@ -1,4 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o +obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o +obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795) += ksz8795.o +obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI) += ksz8795_spi.o diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c new file mode 100644 index 000000000000..a23d3ffdf0c4 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -0,0 +1,1310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip KSZ8795 switch driver + * + * Copyright (C) 2017 Microchip Technology Inc. + * Tristram Ha <Tristram.Ha@microchip.com> + */ + +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/gpio.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_data/microchip-ksz.h> +#include <linux/phy.h> +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <net/dsa.h> +#include <net/switchdev.h> + +#include "ksz_common.h" +#include "ksz8795_reg.h" + +static const struct { + char string[ETH_GSTRING_LEN]; +} mib_names[TOTAL_SWITCH_COUNTER_NUM] = { + { "rx_hi" }, + { "rx_undersize" }, + { "rx_fragments" }, + { "rx_oversize" }, + { "rx_jabbers" }, + { "rx_symbol_err" }, + { "rx_crc_err" }, + { "rx_align_err" }, + { "rx_mac_ctrl" }, + { "rx_pause" }, + { "rx_bcast" }, + { "rx_mcast" }, + { "rx_ucast" }, + { "rx_64_or_less" }, + { "rx_65_127" }, + { "rx_128_255" }, + { "rx_256_511" }, + { "rx_512_1023" }, + { "rx_1024_1522" }, + { "rx_1523_2000" }, + { "rx_2001" }, + { "tx_hi" }, + { "tx_late_col" }, + { "tx_pause" }, + { "tx_bcast" }, + { "tx_mcast" }, + { "tx_ucast" }, + { "tx_deferred" }, + { "tx_total_col" }, + { "tx_exc_col" }, + { "tx_single_col" }, + { "tx_mult_col" }, + { "rx_total" }, + { "tx_total" }, + { "rx_discards" }, + { "tx_discards" }, +}; + +static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) +{ + regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); +} + +static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, + bool set) +{ + regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset), + bits, set ? bits : 0); +} + +static int ksz8795_reset_switch(struct ksz_device *dev) +{ + /* reset switch */ + ksz_write8(dev, REG_POWER_MANAGEMENT_1, + SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S); + ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0); + + return 0; +} + +static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue) +{ + u8 hi, lo; + + /* Number of queues can only be 1, 2, or 4. */ + switch (queue) { + case 4: + case 3: + queue = PORT_QUEUE_SPLIT_4; + break; + case 2: + queue = PORT_QUEUE_SPLIT_2; + break; + default: + queue = PORT_QUEUE_SPLIT_1; + } + ksz_pread8(dev, port, REG_PORT_CTRL_0, &lo); + ksz_pread8(dev, port, P_DROP_TAG_CTRL, &hi); + lo &= ~PORT_QUEUE_SPLIT_L; + if (queue & PORT_QUEUE_SPLIT_2) + lo |= PORT_QUEUE_SPLIT_L; + hi &= ~PORT_QUEUE_SPLIT_H; + if (queue & PORT_QUEUE_SPLIT_4) + hi |= PORT_QUEUE_SPLIT_H; + ksz_pwrite8(dev, port, REG_PORT_CTRL_0, lo); + ksz_pwrite8(dev, port, P_DROP_TAG_CTRL, hi); + + /* Default is port based for egress rate limit. */ + if (queue != PORT_QUEUE_SPLIT_1) + ksz_cfg(dev, REG_SW_CTRL_19, SW_OUT_RATE_LIMIT_QUEUE_BASED, + true); +} + +static void ksz8795_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, + u64 *cnt) +{ + u16 ctrl_addr; + u32 data; + u8 check; + int loop; + + ctrl_addr = addr + SWITCH_COUNTER_NUM * port; + ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + + /* It is almost guaranteed to always read the valid bit because of + * slow SPI speed. + */ + for (loop = 2; loop > 0; loop--) { + ksz_read8(dev, REG_IND_MIB_CHECK, &check); + + if (check & MIB_COUNTER_VALID) { + ksz_read32(dev, REG_IND_DATA_LO, &data); + if (check & MIB_COUNTER_OVERFLOW) + *cnt += MIB_COUNTER_VALUE + 1; + *cnt += data & MIB_COUNTER_VALUE; + break; + } + } + mutex_unlock(&dev->alu_mutex); +} + +static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) +{ + u16 ctrl_addr; + u32 data; + u8 check; + int loop; + + addr -= SWITCH_COUNTER_NUM; + ctrl_addr = (KS_MIB_TOTAL_RX_1 - KS_MIB_TOTAL_RX_0) * port; + ctrl_addr += addr + KS_MIB_TOTAL_RX_0; + ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + + /* It is almost guaranteed to always read the valid bit because of + * slow SPI speed. + */ + for (loop = 2; loop > 0; loop--) { + ksz_read8(dev, REG_IND_MIB_CHECK, &check); + + if (check & MIB_COUNTER_VALID) { + ksz_read32(dev, REG_IND_DATA_LO, &data); + if (addr < 2) { + u64 total; + + total = check & MIB_TOTAL_BYTES_H; + total <<= 32; + *cnt += total; + *cnt += data; + if (check & MIB_COUNTER_OVERFLOW) { + total = MIB_TOTAL_BYTES_H + 1; + total <<= 32; + *cnt += total; + } + } else { + if (check & MIB_COUNTER_OVERFLOW) + *cnt += MIB_PACKET_DROPPED + 1; + *cnt += data & MIB_PACKET_DROPPED; + } + break; + } + } + mutex_unlock(&dev->alu_mutex); +} + +static void ksz8795_freeze_mib(struct ksz_device *dev, int port, bool freeze) +{ + /* enable the port for flush/freeze function */ + if (freeze) + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true); + ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FREEZE, freeze); + + /* disable the port after freeze is done */ + if (!freeze) + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false); +} + +static void ksz8795_port_init_cnt(struct ksz_device *dev, int port) +{ + struct ksz_port_mib *mib = &dev->ports[port].mib; + + /* flush all enabled port MIB counters */ + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true); + ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true); + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false); + + mib->cnt_ptr = 0; + + /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */ + while (mib->cnt_ptr < dev->reg_mib_cnt) { + dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, + &mib->counters[mib->cnt_ptr]); + ++mib->cnt_ptr; + } + + /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */ + while (mib->cnt_ptr < dev->mib_cnt) { + dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, + NULL, &mib->counters[mib->cnt_ptr]); + ++mib->cnt_ptr; + } + mib->cnt_ptr = 0; + memset(mib->counters, 0, dev->mib_cnt * sizeof(u64)); +} + +static void ksz8795_r_table(struct ksz_device *dev, int table, u16 addr, + u64 *data) +{ + u16 ctrl_addr; + + ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr; + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + ksz_read64(dev, REG_IND_DATA_HI, data); + mutex_unlock(&dev->alu_mutex); +} + +static void ksz8795_w_table(struct ksz_device *dev, int table, u16 addr, + u64 data) +{ + u16 ctrl_addr; + + ctrl_addr = IND_ACC_TABLE(table) | addr; + + mutex_lock(&dev->alu_mutex); + ksz_write64(dev, REG_IND_DATA_HI, data); + ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + mutex_unlock(&dev->alu_mutex); +} + +static int ksz8795_valid_dyn_entry(struct ksz_device *dev, u8 *data) +{ + int timeout = 100; + + do { + ksz_read8(dev, REG_IND_DATA_CHECK, data); + timeout--; + } while ((*data & DYNAMIC_MAC_TABLE_NOT_READY) && timeout); + + /* Entry is not ready for accessing. */ + if (*data & DYNAMIC_MAC_TABLE_NOT_READY) { + return -EAGAIN; + /* Entry is ready for accessing. */ + } else { + ksz_read8(dev, REG_IND_DATA_8, data); + + /* There is no valid entry in the table. */ + if (*data & DYNAMIC_MAC_TABLE_MAC_EMPTY) + return -ENXIO; + } + return 0; +} + +static int ksz8795_r_dyn_mac_table(struct ksz_device *dev, u16 addr, + u8 *mac_addr, u8 *fid, u8 *src_port, + u8 *timestamp, u16 *entries) +{ + u32 data_hi, data_lo; + u16 ctrl_addr; + u8 data; + int rc; + + ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr; + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr); + + rc = ksz8795_valid_dyn_entry(dev, &data); + if (rc == -EAGAIN) { + if (addr == 0) + *entries = 0; + } else if (rc == -ENXIO) { + *entries = 0; + /* At least one valid entry in the table. */ + } else { + u64 buf = 0; + int cnt; + + ksz_read64(dev, REG_IND_DATA_HI, &buf); + data_hi = (u32)(buf >> 32); + data_lo = (u32)buf; + + /* Check out how many valid entry in the table. */ + cnt = data & DYNAMIC_MAC_TABLE_ENTRIES_H; + cnt <<= DYNAMIC_MAC_ENTRIES_H_S; + cnt |= (data_hi & DYNAMIC_MAC_TABLE_ENTRIES) >> + DYNAMIC_MAC_ENTRIES_S; + *entries = cnt + 1; + + *fid = (data_hi & DYNAMIC_MAC_TABLE_FID) >> + DYNAMIC_MAC_FID_S; + *src_port = (data_hi & DYNAMIC_MAC_TABLE_SRC_PORT) >> + DYNAMIC_MAC_SRC_PORT_S; + *timestamp = (data_hi & DYNAMIC_MAC_TABLE_TIMESTAMP) >> + DYNAMIC_MAC_TIMESTAMP_S; + + mac_addr[5] = (u8)data_lo; + mac_addr[4] = (u8)(data_lo >> 8); + mac_addr[3] = (u8)(data_lo >> 16); + mac_addr[2] = (u8)(data_lo >> 24); + + mac_addr[1] = (u8)data_hi; + mac_addr[0] = (u8)(data_hi >> 8); + rc = 0; + } + mutex_unlock(&dev->alu_mutex); + + return rc; +} + +static int ksz8795_r_sta_mac_table(struct ksz_device *dev, u16 addr, + struct alu_struct *alu) +{ + u32 data_hi, data_lo; + u64 data; + + ksz8795_r_table(dev, TABLE_STATIC_MAC, addr, &data); + data_hi = data >> 32; + data_lo = (u32)data; + if (data_hi & (STATIC_MAC_TABLE_VALID | STATIC_MAC_TABLE_OVERRIDE)) { + alu->mac[5] = (u8)data_lo; + alu->mac[4] = (u8)(data_lo >> 8); + alu->mac[3] = (u8)(data_lo >> 16); + alu->mac[2] = (u8)(data_lo >> 24); + alu->mac[1] = (u8)data_hi; + alu->mac[0] = (u8)(data_hi >> 8); + alu->port_forward = (data_hi & STATIC_MAC_TABLE_FWD_PORTS) >> + STATIC_MAC_FWD_PORTS_S; + alu->is_override = + (data_hi & STATIC_MAC_TABLE_OVERRIDE) ? 1 : 0; + data_hi >>= 1; + alu->is_use_fid = (data_hi & STATIC_MAC_TABLE_USE_FID) ? 1 : 0; + alu->fid = (data_hi & STATIC_MAC_TABLE_FID) >> + STATIC_MAC_FID_S; + return 0; + } + return -ENXIO; +} + +static void ksz8795_w_sta_mac_table(struct ksz_device *dev, u16 addr, + struct alu_struct *alu) +{ + u32 data_hi, data_lo; + u64 data; + + data_lo = ((u32)alu->mac[2] << 24) | + ((u32)alu->mac[3] << 16) | + ((u32)alu->mac[4] << 8) | alu->mac[5]; + data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1]; + data_hi |= (u32)alu->port_forward << STATIC_MAC_FWD_PORTS_S; + + if (alu->is_override) + data_hi |= STATIC_MAC_TABLE_OVERRIDE; + if (alu->is_use_fid) { + data_hi |= STATIC_MAC_TABLE_USE_FID; + data_hi |= (u32)alu->fid << STATIC_MAC_FID_S; + } + if (alu->is_static) + data_hi |= STATIC_MAC_TABLE_VALID; + else + data_hi &= ~STATIC_MAC_TABLE_OVERRIDE; + + data = (u64)data_hi << 32 | data_lo; + ksz8795_w_table(dev, TABLE_STATIC_MAC, addr, data); +} + +static void ksz8795_from_vlan(u16 vlan, u8 *fid, u8 *member, u8 *valid) +{ + *fid = vlan & VLAN_TABLE_FID; + *member = (vlan & VLAN_TABLE_MEMBERSHIP) >> VLAN_TABLE_MEMBERSHIP_S; + *valid = !!(vlan & VLAN_TABLE_VALID); +} + +static void ksz8795_to_vlan(u8 fid, u8 member, u8 valid, u16 *vlan) +{ + *vlan = fid; + *vlan |= (u16)member << VLAN_TABLE_MEMBERSHIP_S; + if (valid) + *vlan |= VLAN_TABLE_VALID; +} + +static void ksz8795_r_vlan_entries(struct ksz_device *dev, u16 addr) +{ + u64 data; + int i; + + ksz8795_r_table(dev, TABLE_VLAN, addr, &data); + addr *= 4; + for (i = 0; i < 4; i++) { + dev->vlan_cache[addr + i].table[0] = (u16)data; + data >>= VLAN_TABLE_S; + } +} + +static void ksz8795_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan) +{ + int index; + u16 *data; + u16 addr; + u64 buf; + + data = (u16 *)&buf; + addr = vid / 4; + index = vid & 3; + ksz8795_r_table(dev, TABLE_VLAN, addr, &buf); + *vlan = data[index]; +} + +static void ksz8795_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) +{ + int index; + u16 *data; + u16 addr; + u64 buf; + + data = (u16 *)&buf; + addr = vid / 4; + index = vid & 3; + ksz8795_r_table(dev, TABLE_VLAN, addr, &buf); + data[index] = vlan; + dev->vlan_cache[vid].table[0] = vlan; + ksz8795_w_table(dev, TABLE_VLAN, addr, buf); +} + +static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) +{ + u8 restart, speed, ctrl, link; + int processed = true; + u16 data = 0; + u8 p = phy; + + switch (reg) { + case PHY_REG_CTRL: + ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart); + ksz_pread8(dev, p, P_SPEED_STATUS, &speed); + ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl); + if (restart & PORT_PHY_LOOPBACK) + data |= PHY_LOOPBACK; + if (ctrl & PORT_FORCE_100_MBIT) + data |= PHY_SPEED_100MBIT; + if (!(ctrl & PORT_AUTO_NEG_DISABLE)) + data |= PHY_AUTO_NEG_ENABLE; + if (restart & PORT_POWER_DOWN) + data |= PHY_POWER_DOWN; + if (restart & PORT_AUTO_NEG_RESTART) + data |= PHY_AUTO_NEG_RESTART; + if (ctrl & PORT_FORCE_FULL_DUPLEX) + data |= PHY_FULL_DUPLEX; + if (speed & PORT_HP_MDIX) + data |= PHY_HP_MDIX; + if (restart & PORT_FORCE_MDIX) + data |= PHY_FORCE_MDIX; + if (restart & PORT_AUTO_MDIX_DISABLE) + data |= PHY_AUTO_MDIX_DISABLE; + if (restart & PORT_TX_DISABLE) + data |= PHY_TRANSMIT_DISABLE; + if (restart & PORT_LED_OFF) + data |= PHY_LED_DISABLE; + break; + case PHY_REG_STATUS: + ksz_pread8(dev, p, P_LINK_STATUS, &link); + data = PHY_100BTX_FD_CAPABLE | + PHY_100BTX_CAPABLE | + PHY_10BT_FD_CAPABLE | + PHY_10BT_CAPABLE | + PHY_AUTO_NEG_CAPABLE; + if (link & PORT_AUTO_NEG_COMPLETE) + data |= PHY_AUTO_NEG_ACKNOWLEDGE; + if (link & PORT_STAT_LINK_GOOD) + data |= PHY_LINK_STATUS; + break; + case PHY_REG_ID_1: + data = KSZ8795_ID_HI; + break; + case PHY_REG_ID_2: + data = KSZ8795_ID_LO; + break; + case PHY_REG_AUTO_NEGOTIATION: + ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl); + data = PHY_AUTO_NEG_802_3; + if (ctrl & PORT_AUTO_NEG_SYM_PAUSE) + data |= PHY_AUTO_NEG_SYM_PAUSE; + if (ctrl & PORT_AUTO_NEG_100BTX_FD) + data |= PHY_AUTO_NEG_100BTX_FD; + if (ctrl & PORT_AUTO_NEG_100BTX) + data |= PHY_AUTO_NEG_100BTX; + if (ctrl & PORT_AUTO_NEG_10BT_FD) + data |= PHY_AUTO_NEG_10BT_FD; + if (ctrl & PORT_AUTO_NEG_10BT) + data |= PHY_AUTO_NEG_10BT; + break; + case PHY_REG_REMOTE_CAPABILITY: + ksz_pread8(dev, p, P_REMOTE_STATUS, &link); + data = PHY_AUTO_NEG_802_3; + if (link & PORT_REMOTE_SYM_PAUSE) + data |= PHY_AUTO_NEG_SYM_PAUSE; + if (link & PORT_REMOTE_100BTX_FD) + data |= PHY_AUTO_NEG_100BTX_FD; + if (link & PORT_REMOTE_100BTX) + data |= PHY_AUTO_NEG_100BTX; + if (link & PORT_REMOTE_10BT_FD) + data |= PHY_AUTO_NEG_10BT_FD; + if (link & PORT_REMOTE_10BT) + data |= PHY_AUTO_NEG_10BT; + if (data & ~PHY_AUTO_NEG_802_3) + data |= PHY_REMOTE_ACKNOWLEDGE_NOT; + break; + default: + processed = false; + break; + } + if (processed) + *val = data; +} + +static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) +{ + u8 p = phy; + u8 restart, speed, ctrl, data; + + switch (reg) { + case PHY_REG_CTRL: + + /* Do not support PHY reset function. */ + if (val & PHY_RESET) + break; + ksz_pread8(dev, p, P_SPEED_STATUS, &speed); + data = speed; + if (val & PHY_HP_MDIX) + data |= PORT_HP_MDIX; + else + data &= ~PORT_HP_MDIX; + if (data != speed) + ksz_pwrite8(dev, p, P_SPEED_STATUS, data); + ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl); + data = ctrl; + if (!(val & PHY_AUTO_NEG_ENABLE)) + data |= PORT_AUTO_NEG_DISABLE; + else + data &= ~PORT_AUTO_NEG_DISABLE; + + /* Fiber port does not support auto-negotiation. */ + if (dev->ports[p].fiber) + data |= PORT_AUTO_NEG_DISABLE; + if (val & PHY_SPEED_100MBIT) + data |= PORT_FORCE_100_MBIT; + else + data &= ~PORT_FORCE_100_MBIT; + if (val & PHY_FULL_DUPLEX) + data |= PORT_FORCE_FULL_DUPLEX; + else + data &= ~PORT_FORCE_FULL_DUPLEX; + if (data != ctrl) + ksz_pwrite8(dev, p, P_FORCE_CTRL, data); + ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart); + data = restart; + if (val & PHY_LED_DISABLE) + data |= PORT_LED_OFF; + else + data &= ~PORT_LED_OFF; + if (val & PHY_TRANSMIT_DISABLE) + data |= PORT_TX_DISABLE; + else + data &= ~PORT_TX_DISABLE; + if (val & PHY_AUTO_NEG_RESTART) + data |= PORT_AUTO_NEG_RESTART; + else + data &= ~(PORT_AUTO_NEG_RESTART); + if (val & PHY_POWER_DOWN) + data |= PORT_POWER_DOWN; + else + data &= ~PORT_POWER_DOWN; + if (val & PHY_AUTO_MDIX_DISABLE) + data |= PORT_AUTO_MDIX_DISABLE; + else + data &= ~PORT_AUTO_MDIX_DISABLE; + if (val & PHY_FORCE_MDIX) + data |= PORT_FORCE_MDIX; + else + data &= ~PORT_FORCE_MDIX; + if (val & PHY_LOOPBACK) + data |= PORT_PHY_LOOPBACK; + else + data &= ~PORT_PHY_LOOPBACK; + if (data != restart) + ksz_pwrite8(dev, p, P_NEG_RESTART_CTRL, data); + break; + case PHY_REG_AUTO_NEGOTIATION: + ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl); + data = ctrl; + data &= ~(PORT_AUTO_NEG_SYM_PAUSE | + PORT_AUTO_NEG_100BTX_FD | + PORT_AUTO_NEG_100BTX | + PORT_AUTO_NEG_10BT_FD | + PORT_AUTO_NEG_10BT); + if (val & PHY_AUTO_NEG_SYM_PAUSE) + data |= PORT_AUTO_NEG_SYM_PAUSE; + if (val & PHY_AUTO_NEG_100BTX_FD) + data |= PORT_AUTO_NEG_100BTX_FD; + if (val & PHY_AUTO_NEG_100BTX) + data |= PORT_AUTO_NEG_100BTX; + if (val & PHY_AUTO_NEG_10BT_FD) + data |= PORT_AUTO_NEG_10BT_FD; + if (val & PHY_AUTO_NEG_10BT) + data |= PORT_AUTO_NEG_10BT; + if (data != ctrl) + ksz_pwrite8(dev, p, P_LOCAL_CTRL, data); + break; + default: + break; + } +} + +static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + return DSA_TAG_PROTO_KSZ8795; +} + +static void ksz8795_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *buf) +{ + int i; + + for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { + memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string, + ETH_GSTRING_LEN); + } +} + +static void ksz8795_cfg_port_member(struct ksz_device *dev, int port, + u8 member) +{ + u8 data; + + ksz_pread8(dev, port, P_MIRROR_CTRL, &data); + data &= ~PORT_VLAN_MEMBERSHIP; + data |= (member & dev->port_mask); + ksz_pwrite8(dev, port, P_MIRROR_CTRL, data); + dev->ports[port].member = member; +} + +static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + struct ksz_device *dev = ds->priv; + int forward = dev->member; + struct ksz_port *p; + int member = -1; + u8 data; + + p = &dev->ports[port]; + + ksz_pread8(dev, port, P_STP_CTRL, &data); + data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); + + switch (state) { + case BR_STATE_DISABLED: + data |= PORT_LEARN_DISABLE; + if (port < SWITCH_PORT_NUM) + member = 0; + break; + case BR_STATE_LISTENING: + data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); + if (port < SWITCH_PORT_NUM && + p->stp_state == BR_STATE_DISABLED) + member = dev->host_mask | p->vid_member; + break; + case BR_STATE_LEARNING: + data |= PORT_RX_ENABLE; + break; + case BR_STATE_FORWARDING: + data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); + + /* This function is also used internally. */ + if (port == dev->cpu_port) + break; + + /* Port is a member of a bridge. */ + if (dev->br_member & BIT(port)) { + dev->member |= BIT(port); + member = dev->member; + } else { + member = dev->host_mask | p->vid_member; + } + break; + case BR_STATE_BLOCKING: + data |= PORT_LEARN_DISABLE; + if (port < SWITCH_PORT_NUM && + p->stp_state == BR_STATE_DISABLED) + member = dev->host_mask | p->vid_member; + break; + default: + dev_err(ds->dev, "invalid STP state: %d\n", state); + return; + } + + ksz_pwrite8(dev, port, P_STP_CTRL, data); + p->stp_state = state; + if (data & PORT_RX_ENABLE) + dev->rx_ports |= BIT(port); + else + dev->rx_ports &= ~BIT(port); + if (data & PORT_TX_ENABLE) + dev->tx_ports |= BIT(port); + else + dev->tx_ports &= ~BIT(port); + + /* Port membership may share register with STP state. */ + if (member >= 0 && member != p->member) + ksz8795_cfg_port_member(dev, port, (u8)member); + + /* Check if forwarding needs to be updated. */ + if (state != BR_STATE_FORWARDING) { + if (dev->br_member & BIT(port)) + dev->member &= ~BIT(port); + } + + /* When topology has changed the function ksz_update_port_member + * should be called to modify port forwarding behavior. + */ + if (forward != dev->member) + ksz_update_port_member(dev, port); +} + +static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port) +{ + u8 learn[TOTAL_PORT_NUM]; + int first, index, cnt; + struct ksz_port *p; + + if ((uint)port < TOTAL_PORT_NUM) { + first = port; + cnt = port + 1; + } else { + /* Flush all ports. */ + first = 0; + cnt = dev->mib_port_cnt; + } + for (index = first; index < cnt; index++) { + p = &dev->ports[index]; + if (!p->on) + continue; + ksz_pread8(dev, index, P_STP_CTRL, &learn[index]); + if (!(learn[index] & PORT_LEARN_DISABLE)) + ksz_pwrite8(dev, index, P_STP_CTRL, + learn[index] | PORT_LEARN_DISABLE); + } + ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true); + for (index = first; index < cnt; index++) { + p = &dev->ports[index]; + if (!p->on) + continue; + if (!(learn[index] & PORT_LEARN_DISABLE)) + ksz_pwrite8(dev, index, P_STP_CTRL, learn[index]); + } +} + +static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port, + bool flag) +{ + struct ksz_device *dev = ds->priv; + + ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag); + + return 0; +} + +static void ksz8795_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + struct ksz_device *dev = ds->priv; + u16 data, vid, new_pvid = 0; + u8 fid, member, valid; + + ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ksz8795_r_vlan_table(dev, vid, &data); + ksz8795_from_vlan(data, &fid, &member, &valid); + + /* First time to setup the VLAN entry. */ + if (!valid) { + /* Need to find a way to map VID to FID. */ + fid = 1; + valid = 1; + } + member |= BIT(port); + + ksz8795_to_vlan(fid, member, valid, &data); + ksz8795_w_vlan_table(dev, vid, data); + + /* change PVID */ + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) + new_pvid = vid; + } + + if (new_pvid) { + ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid); + vid &= 0xfff; + vid |= new_pvid; + ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid); + } +} + +static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + struct ksz_device *dev = ds->priv; + u16 data, vid, pvid, new_pvid = 0; + u8 fid, member, valid; + + ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid); + pvid = pvid & 0xFFF; + + ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ksz8795_r_vlan_table(dev, vid, &data); + ksz8795_from_vlan(data, &fid, &member, &valid); + + member &= ~BIT(port); + + /* Invalidate the entry if no more member. */ + if (!member) { + fid = 0; + valid = 0; + } + + if (pvid == vid) + new_pvid = 1; + + ksz8795_to_vlan(fid, member, valid, &data); + ksz8795_w_vlan_table(dev, vid, data); + } + + if (new_pvid != pvid) + ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid); + + return 0; +} + +static int ksz8795_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress) +{ + struct ksz_device *dev = ds->priv; + + if (ingress) { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); + dev->mirror_rx |= BIT(port); + } else { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); + dev->mirror_tx |= BIT(port); + } + + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false); + + /* configure mirror port */ + if (dev->mirror_rx || dev->mirror_tx) + ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, + PORT_MIRROR_SNIFFER, true); + + return 0; +} + +static void ksz8795_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct ksz_device *dev = ds->priv; + u8 data; + + if (mirror->ingress) { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); + dev->mirror_rx &= ~BIT(port); + } else { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); + dev->mirror_tx &= ~BIT(port); + } + + ksz_pread8(dev, port, P_MIRROR_CTRL, &data); + + if (!dev->mirror_rx && !dev->mirror_tx) + ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, + PORT_MIRROR_SNIFFER, false); +} + +static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port) +{ + struct ksz_port *p = &dev->ports[port]; + u8 data8, member; + + /* enable broadcast storm limit */ + ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); + + ksz8795_set_prio_queue(dev, port, 4); + + /* disable DiffServ priority */ + ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false); + + /* replace priority */ + ksz_port_cfg(dev, port, P_802_1P_CTRL, PORT_802_1P_REMAPPING, false); + + /* enable 802.1p priority */ + ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true); + + if (cpu_port) { + /* Configure MII interface for proper network communication. */ + ksz_read8(dev, REG_PORT_5_CTRL_6, &data8); + data8 &= ~PORT_INTERFACE_TYPE; + data8 &= ~PORT_GMII_1GPS_MODE; + switch (dev->interface) { + case PHY_INTERFACE_MODE_MII: + p->phydev.speed = SPEED_100; + break; + case PHY_INTERFACE_MODE_RMII: + data8 |= PORT_INTERFACE_RMII; + p->phydev.speed = SPEED_100; + break; + case PHY_INTERFACE_MODE_GMII: + data8 |= PORT_GMII_1GPS_MODE; + data8 |= PORT_INTERFACE_GMII; + p->phydev.speed = SPEED_1000; + break; + default: + data8 &= ~PORT_RGMII_ID_IN_ENABLE; + data8 &= ~PORT_RGMII_ID_OUT_ENABLE; + if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || + dev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + data8 |= PORT_RGMII_ID_IN_ENABLE; + if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || + dev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + data8 |= PORT_RGMII_ID_OUT_ENABLE; + data8 |= PORT_GMII_1GPS_MODE; + data8 |= PORT_INTERFACE_RGMII; + p->phydev.speed = SPEED_1000; + break; + } + ksz_write8(dev, REG_PORT_5_CTRL_6, data8); + p->phydev.duplex = 1; + + member = dev->port_mask; + dev->on_ports = dev->host_mask; + dev->live_ports = dev->host_mask; + } else { + member = dev->host_mask | p->vid_member; + dev->on_ports |= BIT(port); + + /* Link was detected before port is enabled. */ + if (p->phydev.link) + dev->live_ports |= BIT(port); + } + ksz8795_cfg_port_member(dev, port, member); +} + +static void ksz8795_config_cpu_port(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port *p; + u8 remote; + int i; + + ds->num_ports = dev->port_cnt + 1; + + /* Switch marks the maximum frame with extra byte as oversize. */ + ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true); + ksz_cfg(dev, S_TAIL_TAG_CTRL, SW_TAIL_TAG_ENABLE, true); + + p = &dev->ports[dev->cpu_port]; + p->vid_member = dev->port_mask; + p->on = 1; + + ksz8795_port_setup(dev, dev->cpu_port, true); + dev->member = dev->host_mask; + + for (i = 0; i < SWITCH_PORT_NUM; i++) { + p = &dev->ports[i]; + + /* Initialize to non-zero so that ksz_cfg_port_member() will + * be called. + */ + p->vid_member = BIT(i); + p->member = dev->port_mask; + ksz8795_port_stp_state_set(ds, i, BR_STATE_DISABLED); + + /* Last port may be disabled. */ + if (i == dev->port_cnt) + break; + p->on = 1; + p->phy = 1; + } + for (i = 0; i < dev->phy_port_cnt; i++) { + p = &dev->ports[i]; + if (!p->on) + continue; + ksz_pread8(dev, i, P_REMOTE_STATUS, &remote); + if (remote & PORT_FIBER_MODE) + p->fiber = 1; + if (p->fiber) + ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL, + true); + else + ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL, + false); + } +} + +static int ksz8795_setup(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + struct alu_struct alu; + int i, ret = 0; + + dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), + dev->num_vlans, GFP_KERNEL); + if (!dev->vlan_cache) + return -ENOMEM; + + ret = ksz8795_reset_switch(dev); + if (ret) { + dev_err(ds->dev, "failed to reset switch\n"); + return ret; + } + + ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true); + + /* Enable automatic fast aging when link changed detected. */ + ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true); + + /* Enable aggressive back off algorithm in half duplex mode. */ + regmap_update_bits(dev->regmap[0], REG_SW_CTRL_1, + SW_AGGR_BACKOFF, SW_AGGR_BACKOFF); + + /* + * Make sure unicast VLAN boundary is set as default and + * enable no excessive collision drop. + */ + regmap_update_bits(dev->regmap[0], REG_SW_CTRL_2, + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); + + ksz8795_config_cpu_port(ds); + + ksz_cfg(dev, REG_SW_CTRL_2, MULTICAST_STORM_DISABLE, true); + + ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false); + + ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false); + + /* set broadcast storm protection 10% rate */ + regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL, + BROADCAST_STORM_RATE, + (BROADCAST_STORM_VALUE * + BROADCAST_STORM_PROT_RATE) / 100); + + for (i = 0; i < VLAN_TABLE_ENTRIES; i++) + ksz8795_r_vlan_entries(dev, i); + + /* Setup STP address for STP operation. */ + memset(&alu, 0, sizeof(alu)); + ether_addr_copy(alu.mac, eth_stp_addr); + alu.is_static = true; + alu.is_override = true; + alu.port_forward = dev->host_mask; + + ksz8795_w_sta_mac_table(dev, 0, &alu); + + ksz_init_mib_timer(dev); + + return 0; +} + +static const struct dsa_switch_ops ksz8795_switch_ops = { + .get_tag_protocol = ksz8795_get_tag_protocol, + .setup = ksz8795_setup, + .phy_read = ksz_phy_read16, + .phy_write = ksz_phy_write16, + .adjust_link = ksz_adjust_link, + .port_enable = ksz_enable_port, + .port_disable = ksz_disable_port, + .get_strings = ksz8795_get_strings, + .get_ethtool_stats = ksz_get_ethtool_stats, + .get_sset_count = ksz_sset_count, + .port_bridge_join = ksz_port_bridge_join, + .port_bridge_leave = ksz_port_bridge_leave, + .port_stp_state_set = ksz8795_port_stp_state_set, + .port_fast_age = ksz_port_fast_age, + .port_vlan_filtering = ksz8795_port_vlan_filtering, + .port_vlan_prepare = ksz_port_vlan_prepare, + .port_vlan_add = ksz8795_port_vlan_add, + .port_vlan_del = ksz8795_port_vlan_del, + .port_fdb_dump = ksz_port_fdb_dump, + .port_mdb_prepare = ksz_port_mdb_prepare, + .port_mdb_add = ksz_port_mdb_add, + .port_mdb_del = ksz_port_mdb_del, + .port_mirror_add = ksz8795_port_mirror_add, + .port_mirror_del = ksz8795_port_mirror_del, +}; + +static u32 ksz8795_get_port_addr(int port, int offset) +{ + return PORT_CTRL_ADDR(port, offset); +} + +static int ksz8795_switch_detect(struct ksz_device *dev) +{ + u8 id1, id2; + u16 id16; + int ret; + + /* read chip id */ + ret = ksz_read16(dev, REG_CHIP_ID0, &id16); + if (ret) + return ret; + + id1 = id16 >> 8; + id2 = id16 & SW_CHIP_ID_M; + if (id1 != FAMILY_ID || + (id2 != CHIP_ID_94 && id2 != CHIP_ID_95)) + return -ENODEV; + + dev->mib_port_cnt = TOTAL_PORT_NUM; + dev->phy_port_cnt = SWITCH_PORT_NUM; + dev->port_cnt = SWITCH_PORT_NUM; + + if (id2 == CHIP_ID_95) { + u8 val; + + id2 = 0x95; + ksz_read8(dev, REG_PORT_1_STATUS_0, &val); + if (val & PORT_FIBER_MODE) + id2 = 0x65; + } else if (id2 == CHIP_ID_94) { + dev->port_cnt--; + dev->last_port = dev->port_cnt; + id2 = 0x94; + } + id16 &= ~0xff; + id16 |= id2; + dev->chip_id = id16; + + dev->cpu_port = dev->mib_port_cnt - 1; + dev->host_mask = BIT(dev->cpu_port); + + return 0; +} + +struct ksz_chip_data { + u16 chip_id; + const char *dev_name; + int num_vlans; + int num_alus; + int num_statics; + int cpu_ports; + int port_cnt; +}; + +static const struct ksz_chip_data ksz8795_switch_chips[] = { + { + .chip_id = 0x8795, + .dev_name = "KSZ8795", + .num_vlans = 4096, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 4, /* total physical port count */ + }, + { + .chip_id = 0x8794, + .dev_name = "KSZ8794", + .num_vlans = 4096, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 3, /* total physical port count */ + }, + { + .chip_id = 0x8765, + .dev_name = "KSZ8765", + .num_vlans = 4096, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 4, /* total physical port count */ + }, +}; + +static int ksz8795_switch_init(struct ksz_device *dev) +{ + int i; + + mutex_init(&dev->stats_mutex); + mutex_init(&dev->alu_mutex); + mutex_init(&dev->vlan_mutex); + + dev->ds->ops = &ksz8795_switch_ops; + + for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) { + const struct ksz_chip_data *chip = &ksz8795_switch_chips[i]; + + if (dev->chip_id == chip->chip_id) { + dev->name = chip->dev_name; + dev->num_vlans = chip->num_vlans; + dev->num_alus = chip->num_alus; + dev->num_statics = chip->num_statics; + dev->port_cnt = chip->port_cnt; + dev->cpu_ports = chip->cpu_ports; + + break; + } + } + + /* no switch found */ + if (!dev->cpu_ports) + return -ENODEV; + + dev->port_mask = BIT(dev->port_cnt) - 1; + dev->port_mask |= dev->host_mask; + + dev->reg_mib_cnt = SWITCH_COUNTER_NUM; + dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM; + + i = dev->mib_port_cnt; + dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i, + GFP_KERNEL); + if (!dev->ports) + return -ENOMEM; + for (i = 0; i < dev->mib_port_cnt; i++) { + mutex_init(&dev->ports[i].mib.cnt_mutex); + dev->ports[i].mib.counters = + devm_kzalloc(dev->dev, + sizeof(u64) * + (TOTAL_SWITCH_COUNTER_NUM + 1), + GFP_KERNEL); + if (!dev->ports[i].mib.counters) + return -ENOMEM; + } + + return 0; +} + +static void ksz8795_switch_exit(struct ksz_device *dev) +{ + ksz8795_reset_switch(dev); +} + +static const struct ksz_dev_ops ksz8795_dev_ops = { + .get_port_addr = ksz8795_get_port_addr, + .cfg_port_member = ksz8795_cfg_port_member, + .flush_dyn_mac_table = ksz8795_flush_dyn_mac_table, + .port_setup = ksz8795_port_setup, + .r_phy = ksz8795_r_phy, + .w_phy = ksz8795_w_phy, + .r_dyn_mac_table = ksz8795_r_dyn_mac_table, + .r_sta_mac_table = ksz8795_r_sta_mac_table, + .w_sta_mac_table = ksz8795_w_sta_mac_table, + .r_mib_cnt = ksz8795_r_mib_cnt, + .r_mib_pkt = ksz8795_r_mib_pkt, + .freeze_mib = ksz8795_freeze_mib, + .port_init_cnt = ksz8795_port_init_cnt, + .shutdown = ksz8795_reset_switch, + .detect = ksz8795_switch_detect, + .init = ksz8795_switch_init, + .exit = ksz8795_switch_exit, +}; + +int ksz8795_switch_register(struct ksz_device *dev) +{ + return ksz_switch_register(dev, &ksz8795_dev_ops); +} +EXPORT_SYMBOL(ksz8795_switch_register); + +MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>"); +MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h new file mode 100644 index 000000000000..3a50462df8fa --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8795_reg.h @@ -0,0 +1,1004 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Microchip KSZ8795 register definitions + * + * Copyright (c) 2017 Microchip Technology Inc. + * Tristram Ha <Tristram.Ha@microchip.com> + */ + +#ifndef __KSZ8795_REG_H +#define __KSZ8795_REG_H + +#define KS_PORT_M 0x1F + +#define KS_PRIO_M 0x3 +#define KS_PRIO_S 2 + +#define REG_CHIP_ID0 0x00 + +#define FAMILY_ID 0x87 + +#define REG_CHIP_ID1 0x01 + +#define SW_CHIP_ID_M 0xF0 +#define SW_CHIP_ID_S 4 +#define SW_REVISION_M 0x0E +#define SW_REVISION_S 1 +#define SW_START 0x01 + +#define CHIP_ID_94 0x60 +#define CHIP_ID_95 0x90 + +#define REG_SW_CTRL_0 0x02 + +#define SW_NEW_BACKOFF BIT(7) +#define SW_GLOBAL_RESET BIT(6) +#define SW_FLUSH_DYN_MAC_TABLE BIT(5) +#define SW_FLUSH_STA_MAC_TABLE BIT(4) +#define SW_LINK_AUTO_AGING BIT(0) + +#define REG_SW_CTRL_1 0x03 + +#define SW_HUGE_PACKET BIT(6) +#define SW_TX_FLOW_CTRL_DISABLE BIT(5) +#define SW_RX_FLOW_CTRL_DISABLE BIT(4) +#define SW_CHECK_LENGTH BIT(3) +#define SW_AGING_ENABLE BIT(2) +#define SW_FAST_AGING BIT(1) +#define SW_AGGR_BACKOFF BIT(0) + +#define REG_SW_CTRL_2 0x04 + +#define UNICAST_VLAN_BOUNDARY BIT(7) +#define MULTICAST_STORM_DISABLE BIT(6) +#define SW_BACK_PRESSURE BIT(5) +#define FAIR_FLOW_CTRL BIT(4) +#define NO_EXC_COLLISION_DROP BIT(3) +#define SW_LEGAL_PACKET_DISABLE BIT(1) + +#define REG_SW_CTRL_3 0x05 + #define WEIGHTED_FAIR_QUEUE_ENABLE BIT(3) + +#define SW_VLAN_ENABLE BIT(7) +#define SW_IGMP_SNOOP BIT(6) +#define SW_MIRROR_RX_TX BIT(0) + +#define REG_SW_CTRL_4 0x06 + +#define SW_HALF_DUPLEX_FLOW_CTRL BIT(7) +#define SW_HALF_DUPLEX BIT(6) +#define SW_FLOW_CTRL BIT(5) +#define SW_10_MBIT BIT(4) +#define SW_REPLACE_VID BIT(3) +#define BROADCAST_STORM_RATE_HI 0x07 + +#define REG_SW_CTRL_5 0x07 + +#define BROADCAST_STORM_RATE_LO 0xFF +#define BROADCAST_STORM_RATE 0x07FF + +#define REG_SW_CTRL_6 0x08 + +#define SW_MIB_COUNTER_FLUSH BIT(7) +#define SW_MIB_COUNTER_FREEZE BIT(6) +#define SW_MIB_COUNTER_CTRL_ENABLE KS_PORT_M + +#define REG_SW_CTRL_9 0x0B + +#define SPI_CLK_125_MHZ 0x80 +#define SPI_CLK_62_5_MHZ 0x40 +#define SPI_CLK_31_25_MHZ 0x00 + +#define SW_LED_MODE_M 0x3 +#define SW_LED_MODE_S 4 +#define SW_LED_LINK_ACT_SPEED 0 +#define SW_LED_LINK_ACT 1 +#define SW_LED_LINK_ACT_DUPLEX 2 +#define SW_LED_LINK_DUPLEX 3 + +#define REG_SW_CTRL_10 0x0C + +#define SW_TAIL_TAG_ENABLE BIT(1) +#define SW_PASS_PAUSE BIT(0) + +#define REG_SW_CTRL_11 0x0D + +#define REG_POWER_MANAGEMENT_1 0x0E + +#define SW_PLL_POWER_DOWN BIT(5) +#define SW_POWER_MANAGEMENT_MODE_M 0x3 +#define SW_POWER_MANAGEMENT_MODE_S 3 +#define SW_POWER_NORMAL 0 +#define SW_ENERGY_DETECTION 1 +#define SW_SOFTWARE_POWER_DOWN 2 + +#define REG_POWER_MANAGEMENT_2 0x0F + +#define REG_PORT_1_CTRL_0 0x10 +#define REG_PORT_2_CTRL_0 0x20 +#define REG_PORT_3_CTRL_0 0x30 +#define REG_PORT_4_CTRL_0 0x40 +#define REG_PORT_5_CTRL_0 0x50 + +#define PORT_BROADCAST_STORM BIT(7) +#define PORT_DIFFSERV_ENABLE BIT(6) +#define PORT_802_1P_ENABLE BIT(5) +#define PORT_BASED_PRIO_S 3 +#define PORT_BASED_PRIO_M KS_PRIO_M +#define PORT_BASED_PRIO_0 0 +#define PORT_BASED_PRIO_1 1 +#define PORT_BASED_PRIO_2 2 +#define PORT_BASED_PRIO_3 3 +#define PORT_INSERT_TAG BIT(2) +#define PORT_REMOVE_TAG BIT(1) +#define PORT_QUEUE_SPLIT_L BIT(0) + +#define REG_PORT_1_CTRL_1 0x11 +#define REG_PORT_2_CTRL_1 0x21 +#define REG_PORT_3_CTRL_1 0x31 +#define REG_PORT_4_CTRL_1 0x41 +#define REG_PORT_5_CTRL_1 0x51 + +#define PORT_MIRROR_SNIFFER BIT(7) +#define PORT_MIRROR_RX BIT(6) +#define PORT_MIRROR_TX BIT(5) +#define PORT_VLAN_MEMBERSHIP KS_PORT_M + +#define REG_PORT_1_CTRL_2 0x12 +#define REG_PORT_2_CTRL_2 0x22 +#define REG_PORT_3_CTRL_2 0x32 +#define REG_PORT_4_CTRL_2 0x42 +#define REG_PORT_5_CTRL_2 0x52 + +#define PORT_802_1P_REMAPPING BIT(7) +#define PORT_INGRESS_FILTER BIT(6) +#define PORT_DISCARD_NON_VID BIT(5) +#define PORT_FORCE_FLOW_CTRL BIT(4) +#define PORT_BACK_PRESSURE BIT(3) +#define PORT_TX_ENABLE BIT(2) +#define PORT_RX_ENABLE BIT(1) +#define PORT_LEARN_DISABLE BIT(0) + +#define REG_PORT_1_CTRL_3 0x13 +#define REG_PORT_2_CTRL_3 0x23 +#define REG_PORT_3_CTRL_3 0x33 +#define REG_PORT_4_CTRL_3 0x43 +#define REG_PORT_5_CTRL_3 0x53 +#define REG_PORT_1_CTRL_4 0x14 +#define REG_PORT_2_CTRL_4 0x24 +#define REG_PORT_3_CTRL_4 0x34 +#define REG_PORT_4_CTRL_4 0x44 +#define REG_PORT_5_CTRL_4 0x54 + +#define PORT_DEFAULT_VID 0x0001 + +#define REG_PORT_1_CTRL_5 0x15 +#define REG_PORT_2_CTRL_5 0x25 +#define REG_PORT_3_CTRL_5 0x35 +#define REG_PORT_4_CTRL_5 0x45 +#define REG_PORT_5_CTRL_5 0x55 + +#define PORT_ACL_ENABLE BIT(2) +#define PORT_AUTHEN_MODE 0x3 +#define PORT_AUTHEN_PASS 0 +#define PORT_AUTHEN_BLOCK 1 +#define PORT_AUTHEN_TRAP 2 + +#define REG_PORT_5_CTRL_6 0x56 + +#define PORT_MII_INTERNAL_CLOCK BIT(7) +#define PORT_GMII_1GPS_MODE BIT(6) +#define PORT_RGMII_ID_IN_ENABLE BIT(4) +#define PORT_RGMII_ID_OUT_ENABLE BIT(3) +#define PORT_GMII_MAC_MODE BIT(2) +#define PORT_INTERFACE_TYPE 0x3 +#define PORT_INTERFACE_MII 0 +#define PORT_INTERFACE_RMII 1 +#define PORT_INTERFACE_GMII 2 +#define PORT_INTERFACE_RGMII 3 + +#define REG_PORT_1_CTRL_7 0x17 +#define REG_PORT_2_CTRL_7 0x27 +#define REG_PORT_3_CTRL_7 0x37 +#define REG_PORT_4_CTRL_7 0x47 + +#define PORT_AUTO_NEG_ASYM_PAUSE BIT(5) +#define PORT_AUTO_NEG_SYM_PAUSE BIT(4) +#define PORT_AUTO_NEG_100BTX_FD BIT(3) +#define PORT_AUTO_NEG_100BTX BIT(2) +#define PORT_AUTO_NEG_10BT_FD BIT(1) +#define PORT_AUTO_NEG_10BT BIT(0) + +#define REG_PORT_1_STATUS_0 0x18 +#define REG_PORT_2_STATUS_0 0x28 +#define REG_PORT_3_STATUS_0 0x38 +#define REG_PORT_4_STATUS_0 0x48 + +/* For KSZ8765. */ +#define PORT_FIBER_MODE BIT(7) + +#define PORT_REMOTE_ASYM_PAUSE BIT(5) +#define PORT_REMOTE_SYM_PAUSE BIT(4) +#define PORT_REMOTE_100BTX_FD BIT(3) +#define PORT_REMOTE_100BTX BIT(2) +#define PORT_REMOTE_10BT_FD BIT(1) +#define PORT_REMOTE_10BT BIT(0) + +#define REG_PORT_1_STATUS_1 0x19 +#define REG_PORT_2_STATUS_1 0x29 +#define REG_PORT_3_STATUS_1 0x39 +#define REG_PORT_4_STATUS_1 0x49 + +#define PORT_HP_MDIX BIT(7) +#define PORT_REVERSED_POLARITY BIT(5) +#define PORT_TX_FLOW_CTRL BIT(4) +#define PORT_RX_FLOW_CTRL BIT(3) +#define PORT_STAT_SPEED_100MBIT BIT(2) +#define PORT_STAT_FULL_DUPLEX BIT(1) + +#define PORT_REMOTE_FAULT BIT(0) + +#define REG_PORT_1_LINK_MD_CTRL 0x1A +#define REG_PORT_2_LINK_MD_CTRL 0x2A +#define REG_PORT_3_LINK_MD_CTRL 0x3A +#define REG_PORT_4_LINK_MD_CTRL 0x4A + +#define PORT_CABLE_10M_SHORT BIT(7) +#define PORT_CABLE_DIAG_RESULT_M 0x3 +#define PORT_CABLE_DIAG_RESULT_S 5 +#define PORT_CABLE_STAT_NORMAL 0 +#define PORT_CABLE_STAT_OPEN 1 +#define PORT_CABLE_STAT_SHORT 2 +#define PORT_CABLE_STAT_FAILED 3 +#define PORT_START_CABLE_DIAG BIT(4) +#define PORT_FORCE_LINK BIT(3) +#define PORT_POWER_SAVING BIT(2) +#define PORT_PHY_REMOTE_LOOPBACK BIT(1) +#define PORT_CABLE_FAULT_COUNTER_H 0x01 + +#define REG_PORT_1_LINK_MD_RESULT 0x1B +#define REG_PORT_2_LINK_MD_RESULT 0x2B +#define REG_PORT_3_LINK_MD_RESULT 0x3B +#define REG_PORT_4_LINK_MD_RESULT 0x4B + +#define PORT_CABLE_FAULT_COUNTER_L 0xFF +#define PORT_CABLE_FAULT_COUNTER 0x1FF + +#define REG_PORT_1_CTRL_9 0x1C +#define REG_PORT_2_CTRL_9 0x2C +#define REG_PORT_3_CTRL_9 0x3C +#define REG_PORT_4_CTRL_9 0x4C + +#define PORT_AUTO_NEG_DISABLE BIT(7) +#define PORT_FORCE_100_MBIT BIT(6) +#define PORT_FORCE_FULL_DUPLEX BIT(5) + +#define REG_PORT_1_CTRL_10 0x1D +#define REG_PORT_2_CTRL_10 0x2D +#define REG_PORT_3_CTRL_10 0x3D +#define REG_PORT_4_CTRL_10 0x4D + +#define PORT_LED_OFF BIT(7) +#define PORT_TX_DISABLE BIT(6) +#define PORT_AUTO_NEG_RESTART BIT(5) +#define PORT_POWER_DOWN BIT(3) +#define PORT_AUTO_MDIX_DISABLE BIT(2) +#define PORT_FORCE_MDIX BIT(1) +#define PORT_MAC_LOOPBACK BIT(0) + +#define REG_PORT_1_STATUS_2 0x1E +#define REG_PORT_2_STATUS_2 0x2E +#define REG_PORT_3_STATUS_2 0x3E +#define REG_PORT_4_STATUS_2 0x4E + +#define PORT_MDIX_STATUS BIT(7) +#define PORT_AUTO_NEG_COMPLETE BIT(6) +#define PORT_STAT_LINK_GOOD BIT(5) + +#define REG_PORT_1_STATUS_3 0x1F +#define REG_PORT_2_STATUS_3 0x2F +#define REG_PORT_3_STATUS_3 0x3F +#define REG_PORT_4_STATUS_3 0x4F + +#define PORT_PHY_LOOPBACK BIT(7) +#define PORT_PHY_ISOLATE BIT(5) +#define PORT_PHY_SOFT_RESET BIT(4) +#define PORT_PHY_FORCE_LINK BIT(3) +#define PORT_PHY_MODE_M 0x7 +#define PHY_MODE_IN_AUTO_NEG 1 +#define PHY_MODE_10BT_HALF 2 +#define PHY_MODE_100BT_HALF 3 +#define PHY_MODE_10BT_FULL 5 +#define PHY_MODE_100BT_FULL 6 +#define PHY_MODE_ISOLDATE 7 + +#define REG_PORT_CTRL_0 0x00 +#define REG_PORT_CTRL_1 0x01 +#define REG_PORT_CTRL_2 0x02 +#define REG_PORT_CTRL_VID 0x03 + +#define REG_PORT_CTRL_5 0x05 + +#define REG_PORT_CTRL_7 0x07 +#define REG_PORT_STATUS_0 0x08 +#define REG_PORT_STATUS_1 0x09 +#define REG_PORT_LINK_MD_CTRL 0x0A +#define REG_PORT_LINK_MD_RESULT 0x0B +#define REG_PORT_CTRL_9 0x0C +#define REG_PORT_CTRL_10 0x0D +#define REG_PORT_STATUS_2 0x0E +#define REG_PORT_STATUS_3 0x0F + +#define REG_PORT_CTRL_12 0xA0 +#define REG_PORT_CTRL_13 0xA1 +#define REG_PORT_RATE_CTRL_3 0xA2 +#define REG_PORT_RATE_CTRL_2 0xA3 +#define REG_PORT_RATE_CTRL_1 0xA4 +#define REG_PORT_RATE_CTRL_0 0xA5 +#define REG_PORT_RATE_LIMIT 0xA6 +#define REG_PORT_IN_RATE_0 0xA7 +#define REG_PORT_IN_RATE_1 0xA8 +#define REG_PORT_IN_RATE_2 0xA9 +#define REG_PORT_IN_RATE_3 0xAA +#define REG_PORT_OUT_RATE_0 0xAB +#define REG_PORT_OUT_RATE_1 0xAC +#define REG_PORT_OUT_RATE_2 0xAD +#define REG_PORT_OUT_RATE_3 0xAE + +#define PORT_CTRL_ADDR(port, addr) \ + ((addr) + REG_PORT_1_CTRL_0 + (port) * \ + (REG_PORT_2_CTRL_0 - REG_PORT_1_CTRL_0)) + +#define REG_SW_MAC_ADDR_0 0x68 +#define REG_SW_MAC_ADDR_1 0x69 +#define REG_SW_MAC_ADDR_2 0x6A +#define REG_SW_MAC_ADDR_3 0x6B +#define REG_SW_MAC_ADDR_4 0x6C +#define REG_SW_MAC_ADDR_5 0x6D + +#define REG_IND_CTRL_0 0x6E + +#define TABLE_EXT_SELECT_S 5 +#define TABLE_EEE_V 1 +#define TABLE_ACL_V 2 +#define TABLE_PME_V 4 +#define TABLE_LINK_MD_V 5 +#define TABLE_EEE (TABLE_EEE_V << TABLE_EXT_SELECT_S) +#define TABLE_ACL (TABLE_ACL_V << TABLE_EXT_SELECT_S) +#define TABLE_PME (TABLE_PME_V << TABLE_EXT_SELECT_S) +#define TABLE_LINK_MD (TABLE_LINK_MD << TABLE_EXT_SELECT_S) +#define TABLE_READ BIT(4) +#define TABLE_SELECT_S 2 +#define TABLE_STATIC_MAC_V 0 +#define TABLE_VLAN_V 1 +#define TABLE_DYNAMIC_MAC_V 2 +#define TABLE_MIB_V 3 +#define TABLE_STATIC_MAC (TABLE_STATIC_MAC_V << TABLE_SELECT_S) +#define TABLE_VLAN (TABLE_VLAN_V << TABLE_SELECT_S) +#define TABLE_DYNAMIC_MAC (TABLE_DYNAMIC_MAC_V << TABLE_SELECT_S) +#define TABLE_MIB (TABLE_MIB_V << TABLE_SELECT_S) + +#define REG_IND_CTRL_1 0x6F + +#define TABLE_ENTRY_MASK 0x03FF +#define TABLE_EXT_ENTRY_MASK 0x0FFF + +#define REG_IND_DATA_8 0x70 +#define REG_IND_DATA_7 0x71 +#define REG_IND_DATA_6 0x72 +#define REG_IND_DATA_5 0x73 +#define REG_IND_DATA_4 0x74 +#define REG_IND_DATA_3 0x75 +#define REG_IND_DATA_2 0x76 +#define REG_IND_DATA_1 0x77 +#define REG_IND_DATA_0 0x78 + +#define REG_IND_DATA_PME_EEE_ACL 0xA0 + +#define REG_IND_DATA_CHECK REG_IND_DATA_6 +#define REG_IND_MIB_CHECK REG_IND_DATA_4 +#define REG_IND_DATA_HI REG_IND_DATA_7 +#define REG_IND_DATA_LO REG_IND_DATA_3 + +#define REG_INT_STATUS 0x7C +#define REG_INT_ENABLE 0x7D + +#define INT_PME BIT(4) + +#define REG_ACL_INT_STATUS 0x7E +#define REG_ACL_INT_ENABLE 0x7F + +#define INT_PORT_5 BIT(4) +#define INT_PORT_4 BIT(3) +#define INT_PORT_3 BIT(2) +#define INT_PORT_2 BIT(1) +#define INT_PORT_1 BIT(0) + +#define INT_PORT_ALL \ + (INT_PORT_5 | INT_PORT_4 | INT_PORT_3 | INT_PORT_2 | INT_PORT_1) + +#define REG_SW_CTRL_12 0x80 +#define REG_SW_CTRL_13 0x81 + +#define SWITCH_802_1P_MASK 3 +#define SWITCH_802_1P_BASE 3 +#define SWITCH_802_1P_SHIFT 2 + +#define SW_802_1P_MAP_M KS_PRIO_M +#define SW_802_1P_MAP_S KS_PRIO_S + +#define REG_SWITCH_CTRL_14 0x82 + +#define SW_PRIO_MAPPING_M KS_PRIO_M +#define SW_PRIO_MAPPING_S 6 +#define SW_PRIO_MAP_3_HI 0 +#define SW_PRIO_MAP_2_HI 2 +#define SW_PRIO_MAP_0_LO 3 + +#define REG_SW_CTRL_15 0x83 +#define REG_SW_CTRL_16 0x84 +#define REG_SW_CTRL_17 0x85 +#define REG_SW_CTRL_18 0x86 + +#define SW_SELF_ADDR_FILTER_ENABLE BIT(6) + +#define REG_SW_UNK_UCAST_CTRL 0x83 +#define REG_SW_UNK_MCAST_CTRL 0x84 +#define REG_SW_UNK_VID_CTRL 0x85 +#define REG_SW_UNK_IP_MCAST_CTRL 0x86 + +#define SW_UNK_FWD_ENABLE BIT(5) +#define SW_UNK_FWD_MAP KS_PORT_M + +#define REG_SW_CTRL_19 0x87 + +#define SW_IN_RATE_LIMIT_PERIOD_M 0x3 +#define SW_IN_RATE_LIMIT_PERIOD_S 4 +#define SW_IN_RATE_LIMIT_16_MS 0 +#define SW_IN_RATE_LIMIT_64_MS 1 +#define SW_IN_RATE_LIMIT_256_MS 2 +#define SW_OUT_RATE_LIMIT_QUEUE_BASED BIT(3) +#define SW_INS_TAG_ENABLE BIT(2) + +#define REG_TOS_PRIO_CTRL_0 0x90 +#define REG_TOS_PRIO_CTRL_1 0x91 +#define REG_TOS_PRIO_CTRL_2 0x92 +#define REG_TOS_PRIO_CTRL_3 0x93 +#define REG_TOS_PRIO_CTRL_4 0x94 +#define REG_TOS_PRIO_CTRL_5 0x95 +#define REG_TOS_PRIO_CTRL_6 0x96 +#define REG_TOS_PRIO_CTRL_7 0x97 +#define REG_TOS_PRIO_CTRL_8 0x98 +#define REG_TOS_PRIO_CTRL_9 0x99 +#define REG_TOS_PRIO_CTRL_10 0x9A +#define REG_TOS_PRIO_CTRL_11 0x9B +#define REG_TOS_PRIO_CTRL_12 0x9C +#define REG_TOS_PRIO_CTRL_13 0x9D +#define REG_TOS_PRIO_CTRL_14 0x9E +#define REG_TOS_PRIO_CTRL_15 0x9F + +#define TOS_PRIO_M KS_PRIO_M +#define TOS_PRIO_S KS_PRIO_S + +#define REG_SW_CTRL_20 0xA3 + +#define SW_GMII_DRIVE_STRENGTH_S 4 +#define SW_DRIVE_STRENGTH_M 0x7 +#define SW_DRIVE_STRENGTH_2MA 0 +#define SW_DRIVE_STRENGTH_4MA 1 +#define SW_DRIVE_STRENGTH_8MA 2 +#define SW_DRIVE_STRENGTH_12MA 3 +#define SW_DRIVE_STRENGTH_16MA 4 +#define SW_DRIVE_STRENGTH_20MA 5 +#define SW_DRIVE_STRENGTH_24MA 6 +#define SW_DRIVE_STRENGTH_28MA 7 +#define SW_MII_DRIVE_STRENGTH_S 0 + +#define REG_SW_CTRL_21 0xA4 + +#define SW_IPV6_MLD_OPTION BIT(3) +#define SW_IPV6_MLD_SNOOP BIT(2) + +#define REG_PORT_1_CTRL_12 0xB0 +#define REG_PORT_2_CTRL_12 0xC0 +#define REG_PORT_3_CTRL_12 0xD0 +#define REG_PORT_4_CTRL_12 0xE0 +#define REG_PORT_5_CTRL_12 0xF0 + +#define PORT_PASS_ALL BIT(6) +#define PORT_INS_TAG_FOR_PORT_5_S 3 +#define PORT_INS_TAG_FOR_PORT_5 BIT(3) +#define PORT_INS_TAG_FOR_PORT_4 BIT(2) +#define PORT_INS_TAG_FOR_PORT_3 BIT(1) +#define PORT_INS_TAG_FOR_PORT_2 BIT(0) + +#define REG_PORT_1_CTRL_13 0xB1 +#define REG_PORT_2_CTRL_13 0xC1 +#define REG_PORT_3_CTRL_13 0xD1 +#define REG_PORT_4_CTRL_13 0xE1 +#define REG_PORT_5_CTRL_13 0xF1 + +#define PORT_QUEUE_SPLIT_H BIT(1) +#define PORT_QUEUE_SPLIT_1 0 +#define PORT_QUEUE_SPLIT_2 1 +#define PORT_QUEUE_SPLIT_4 2 +#define PORT_DROP_TAG BIT(0) + +#define REG_PORT_1_CTRL_14 0xB2 +#define REG_PORT_2_CTRL_14 0xC2 +#define REG_PORT_3_CTRL_14 0xD2 +#define REG_PORT_4_CTRL_14 0xE2 +#define REG_PORT_5_CTRL_14 0xF2 +#define REG_PORT_1_CTRL_15 0xB3 +#define REG_PORT_2_CTRL_15 0xC3 +#define REG_PORT_3_CTRL_15 0xD3 +#define REG_PORT_4_CTRL_15 0xE3 +#define REG_PORT_5_CTRL_15 0xF3 +#define REG_PORT_1_CTRL_16 0xB4 +#define REG_PORT_2_CTRL_16 0xC4 +#define REG_PORT_3_CTRL_16 0xD4 +#define REG_PORT_4_CTRL_16 0xE4 +#define REG_PORT_5_CTRL_16 0xF4 +#define REG_PORT_1_CTRL_17 0xB5 +#define REG_PORT_2_CTRL_17 0xC5 +#define REG_PORT_3_CTRL_17 0xD5 +#define REG_PORT_4_CTRL_17 0xE5 +#define REG_PORT_5_CTRL_17 0xF5 + +#define REG_PORT_1_RATE_CTRL_3 0xB2 +#define REG_PORT_1_RATE_CTRL_2 0xB3 +#define REG_PORT_1_RATE_CTRL_1 0xB4 +#define REG_PORT_1_RATE_CTRL_0 0xB5 +#define REG_PORT_2_RATE_CTRL_3 0xC2 +#define REG_PORT_2_RATE_CTRL_2 0xC3 +#define REG_PORT_2_RATE_CTRL_1 0xC4 +#define REG_PORT_2_RATE_CTRL_0 0xC5 +#define REG_PORT_3_RATE_CTRL_3 0xD2 +#define REG_PORT_3_RATE_CTRL_2 0xD3 +#define REG_PORT_3_RATE_CTRL_1 0xD4 +#define REG_PORT_3_RATE_CTRL_0 0xD5 +#define REG_PORT_4_RATE_CTRL_3 0xE2 +#define REG_PORT_4_RATE_CTRL_2 0xE3 +#define REG_PORT_4_RATE_CTRL_1 0xE4 +#define REG_PORT_4_RATE_CTRL_0 0xE5 +#define REG_PORT_5_RATE_CTRL_3 0xF2 +#define REG_PORT_5_RATE_CTRL_2 0xF3 +#define REG_PORT_5_RATE_CTRL_1 0xF4 +#define REG_PORT_5_RATE_CTRL_0 0xF5 + +#define RATE_CTRL_ENABLE BIT(7) +#define RATE_RATIO_M (BIT(7) - 1) + +#define PORT_OUT_RATE_ENABLE BIT(7) + +#define REG_PORT_1_RATE_LIMIT 0xB6 +#define REG_PORT_2_RATE_LIMIT 0xC6 +#define REG_PORT_3_RATE_LIMIT 0xD6 +#define REG_PORT_4_RATE_LIMIT 0xE6 +#define REG_PORT_5_RATE_LIMIT 0xF6 + +#define PORT_IN_PORT_BASED_S 6 +#define PORT_RATE_PACKET_BASED_S 5 +#define PORT_IN_FLOW_CTRL_S 4 +#define PORT_IN_LIMIT_MODE_M 0x3 +#define PORT_IN_LIMIT_MODE_S 2 +#define PORT_COUNT_IFG_S 1 +#define PORT_COUNT_PREAMBLE_S 0 +#define PORT_IN_PORT_BASED BIT(PORT_IN_PORT_BASED_S) +#define PORT_RATE_PACKET_BASED BIT(PORT_RATE_PACKET_BASED_S) +#define PORT_IN_FLOW_CTRL BIT(PORT_IN_FLOW_CTRL_S) +#define PORT_IN_ALL 0 +#define PORT_IN_UNICAST 1 +#define PORT_IN_MULTICAST 2 +#define PORT_IN_BROADCAST 3 +#define PORT_COUNT_IFG BIT(PORT_COUNT_IFG_S) +#define PORT_COUNT_PREAMBLE BIT(PORT_COUNT_PREAMBLE_S) + +#define REG_PORT_1_IN_RATE_0 0xB7 +#define REG_PORT_2_IN_RATE_0 0xC7 +#define REG_PORT_3_IN_RATE_0 0xD7 +#define REG_PORT_4_IN_RATE_0 0xE7 +#define REG_PORT_5_IN_RATE_0 0xF7 +#define REG_PORT_1_IN_RATE_1 0xB8 +#define REG_PORT_2_IN_RATE_1 0xC8 +#define REG_PORT_3_IN_RATE_1 0xD8 +#define REG_PORT_4_IN_RATE_1 0xE8 +#define REG_PORT_5_IN_RATE_1 0xF8 +#define REG_PORT_1_IN_RATE_2 0xB9 +#define REG_PORT_2_IN_RATE_2 0xC9 +#define REG_PORT_3_IN_RATE_2 0xD9 +#define REG_PORT_4_IN_RATE_2 0xE9 +#define REG_PORT_5_IN_RATE_2 0xF9 +#define REG_PORT_1_IN_RATE_3 0xBA +#define REG_PORT_2_IN_RATE_3 0xCA +#define REG_PORT_3_IN_RATE_3 0xDA +#define REG_PORT_4_IN_RATE_3 0xEA +#define REG_PORT_5_IN_RATE_3 0xFA + +#define PORT_IN_RATE_ENABLE BIT(7) +#define PORT_RATE_LIMIT_M (BIT(7) - 1) + +#define REG_PORT_1_OUT_RATE_0 0xBB +#define REG_PORT_2_OUT_RATE_0 0xCB +#define REG_PORT_3_OUT_RATE_0 0xDB +#define REG_PORT_4_OUT_RATE_0 0xEB +#define REG_PORT_5_OUT_RATE_0 0xFB +#define REG_PORT_1_OUT_RATE_1 0xBC +#define REG_PORT_2_OUT_RATE_1 0xCC +#define REG_PORT_3_OUT_RATE_1 0xDC +#define REG_PORT_4_OUT_RATE_1 0xEC +#define REG_PORT_5_OUT_RATE_1 0xFC +#define REG_PORT_1_OUT_RATE_2 0xBD +#define REG_PORT_2_OUT_RATE_2 0xCD +#define REG_PORT_3_OUT_RATE_2 0xDD +#define REG_PORT_4_OUT_RATE_2 0xED +#define REG_PORT_5_OUT_RATE_2 0xFD +#define REG_PORT_1_OUT_RATE_3 0xBE +#define REG_PORT_2_OUT_RATE_3 0xCE +#define REG_PORT_3_OUT_RATE_3 0xDE +#define REG_PORT_4_OUT_RATE_3 0xEE +#define REG_PORT_5_OUT_RATE_3 0xFE + +/* PME */ + +#define SW_PME_OUTPUT_ENABLE BIT(1) +#define SW_PME_ACTIVE_HIGH BIT(0) + +#define PORT_MAGIC_PACKET_DETECT BIT(2) +#define PORT_LINK_UP_DETECT BIT(1) +#define PORT_ENERGY_DETECT BIT(0) + +/* ACL */ + +#define ACL_FIRST_RULE_M 0xF + +#define ACL_MODE_M 0x3 +#define ACL_MODE_S 4 +#define ACL_MODE_DISABLE 0 +#define ACL_MODE_LAYER_2 1 +#define ACL_MODE_LAYER_3 2 +#define ACL_MODE_LAYER_4 3 +#define ACL_ENABLE_M 0x3 +#define ACL_ENABLE_S 2 +#define ACL_ENABLE_2_COUNT 0 +#define ACL_ENABLE_2_TYPE 1 +#define ACL_ENABLE_2_MAC 2 +#define ACL_ENABLE_2_BOTH 3 +#define ACL_ENABLE_3_IP 1 +#define ACL_ENABLE_3_SRC_DST_COMP 2 +#define ACL_ENABLE_4_PROTOCOL 0 +#define ACL_ENABLE_4_TCP_PORT_COMP 1 +#define ACL_ENABLE_4_UDP_PORT_COMP 2 +#define ACL_ENABLE_4_TCP_SEQN_COMP 3 +#define ACL_SRC BIT(1) +#define ACL_EQUAL BIT(0) + +#define ACL_MAX_PORT 0xFFFF + +#define ACL_MIN_PORT 0xFFFF +#define ACL_IP_ADDR 0xFFFFFFFF +#define ACL_TCP_SEQNUM 0xFFFFFFFF + +#define ACL_RESERVED 0xF8 +#define ACL_PORT_MODE_M 0x3 +#define ACL_PORT_MODE_S 1 +#define ACL_PORT_MODE_DISABLE 0 +#define ACL_PORT_MODE_EITHER 1 +#define ACL_PORT_MODE_IN_RANGE 2 +#define ACL_PORT_MODE_OUT_OF_RANGE 3 + +#define ACL_TCP_FLAG_ENABLE BIT(0) + +#define ACL_TCP_FLAG_M 0xFF + +#define ACL_TCP_FLAG 0xFF +#define ACL_ETH_TYPE 0xFFFF +#define ACL_IP_M 0xFFFFFFFF + +#define ACL_PRIO_MODE_M 0x3 +#define ACL_PRIO_MODE_S 6 +#define ACL_PRIO_MODE_DISABLE 0 +#define ACL_PRIO_MODE_HIGHER 1 +#define ACL_PRIO_MODE_LOWER 2 +#define ACL_PRIO_MODE_REPLACE 3 +#define ACL_PRIO_M 0x7 +#define ACL_PRIO_S 3 +#define ACL_VLAN_PRIO_REPLACE BIT(2) +#define ACL_VLAN_PRIO_M 0x7 +#define ACL_VLAN_PRIO_HI_M 0x3 + +#define ACL_VLAN_PRIO_LO_M 0x8 +#define ACL_VLAN_PRIO_S 7 +#define ACL_MAP_MODE_M 0x3 +#define ACL_MAP_MODE_S 5 +#define ACL_MAP_MODE_DISABLE 0 +#define ACL_MAP_MODE_OR 1 +#define ACL_MAP_MODE_AND 2 +#define ACL_MAP_MODE_REPLACE 3 +#define ACL_MAP_PORT_M 0x1F + +#define ACL_CNT_M (BIT(11) - 1) +#define ACL_CNT_S 5 +#define ACL_MSEC_UNIT BIT(4) +#define ACL_INTR_MODE BIT(3) + +#define REG_PORT_ACL_BYTE_EN_MSB 0x10 + +#define ACL_BYTE_EN_MSB_M 0x3F + +#define REG_PORT_ACL_BYTE_EN_LSB 0x11 + +#define ACL_ACTION_START 0xA +#define ACL_ACTION_LEN 2 +#define ACL_INTR_CNT_START 0xB +#define ACL_RULESET_START 0xC +#define ACL_RULESET_LEN 2 +#define ACL_TABLE_LEN 14 + +#define ACL_ACTION_ENABLE 0x000C +#define ACL_MATCH_ENABLE 0x1FF0 +#define ACL_RULESET_ENABLE 0x2003 +#define ACL_BYTE_ENABLE ((ACL_BYTE_EN_MSB_M << 8) | 0xFF) +#define ACL_MODE_ENABLE (0x10 << 8) + +#define REG_PORT_ACL_CTRL_0 0x12 + +#define PORT_ACL_WRITE_DONE BIT(6) +#define PORT_ACL_READ_DONE BIT(5) +#define PORT_ACL_WRITE BIT(4) +#define PORT_ACL_INDEX_M 0xF + +#define REG_PORT_ACL_CTRL_1 0x13 + +#define PORT_ACL_FORCE_DLR_MISS BIT(0) + +#ifndef PHY_REG_CTRL +#define PHY_REG_CTRL 0 + +#define PHY_RESET BIT(15) +#define PHY_LOOPBACK BIT(14) +#define PHY_SPEED_100MBIT BIT(13) +#define PHY_AUTO_NEG_ENABLE BIT(12) +#define PHY_POWER_DOWN BIT(11) +#define PHY_MII_DISABLE BIT(10) +#define PHY_AUTO_NEG_RESTART BIT(9) +#define PHY_FULL_DUPLEX BIT(8) +#define PHY_COLLISION_TEST_NOT BIT(7) +#define PHY_HP_MDIX BIT(5) +#define PHY_FORCE_MDIX BIT(4) +#define PHY_AUTO_MDIX_DISABLE BIT(3) +#define PHY_REMOTE_FAULT_DISABLE BIT(2) +#define PHY_TRANSMIT_DISABLE BIT(1) +#define PHY_LED_DISABLE BIT(0) + +#define PHY_REG_STATUS 1 + +#define PHY_100BT4_CAPABLE BIT(15) +#define PHY_100BTX_FD_CAPABLE BIT(14) +#define PHY_100BTX_CAPABLE BIT(13) +#define PHY_10BT_FD_CAPABLE BIT(12) +#define PHY_10BT_CAPABLE BIT(11) +#define PHY_MII_SUPPRESS_CAPABLE_NOT BIT(6) +#define PHY_AUTO_NEG_ACKNOWLEDGE BIT(5) +#define PHY_REMOTE_FAULT BIT(4) +#define PHY_AUTO_NEG_CAPABLE BIT(3) +#define PHY_LINK_STATUS BIT(2) +#define PHY_JABBER_DETECT_NOT BIT(1) +#define PHY_EXTENDED_CAPABILITY BIT(0) + +#define PHY_REG_ID_1 2 +#define PHY_REG_ID_2 3 + +#define PHY_REG_AUTO_NEGOTIATION 4 + +#define PHY_AUTO_NEG_NEXT_PAGE_NOT BIT(15) +#define PHY_AUTO_NEG_REMOTE_FAULT_NOT BIT(13) +#define PHY_AUTO_NEG_SYM_PAUSE BIT(10) +#define PHY_AUTO_NEG_100BT4 BIT(9) +#define PHY_AUTO_NEG_100BTX_FD BIT(8) +#define PHY_AUTO_NEG_100BTX BIT(7) +#define PHY_AUTO_NEG_10BT_FD BIT(6) +#define PHY_AUTO_NEG_10BT BIT(5) +#define PHY_AUTO_NEG_SELECTOR 0x001F +#define PHY_AUTO_NEG_802_3 0x0001 + +#define PHY_REG_REMOTE_CAPABILITY 5 + +#define PHY_REMOTE_NEXT_PAGE_NOT BIT(15) +#define PHY_REMOTE_ACKNOWLEDGE_NOT BIT(14) +#define PHY_REMOTE_REMOTE_FAULT_NOT BIT(13) +#define PHY_REMOTE_SYM_PAUSE BIT(10) +#define PHY_REMOTE_100BTX_FD BIT(8) +#define PHY_REMOTE_100BTX BIT(7) +#define PHY_REMOTE_10BT_FD BIT(6) +#define PHY_REMOTE_10BT BIT(5) +#endif + +#define KSZ8795_ID_HI 0x0022 +#define KSZ8795_ID_LO 0x1550 + +#define KSZ8795_SW_ID 0x8795 + +#define PHY_REG_LINK_MD 0x1D + +#define PHY_START_CABLE_DIAG BIT(15) +#define PHY_CABLE_DIAG_RESULT 0x6000 +#define PHY_CABLE_STAT_NORMAL 0x0000 +#define PHY_CABLE_STAT_OPEN 0x2000 +#define PHY_CABLE_STAT_SHORT 0x4000 +#define PHY_CABLE_STAT_FAILED 0x6000 +#define PHY_CABLE_10M_SHORT BIT(12) +#define PHY_CABLE_FAULT_COUNTER 0x01FF + +#define PHY_REG_PHY_CTRL 0x1F + +#define PHY_MODE_M 0x7 +#define PHY_MODE_S 8 +#define PHY_STAT_REVERSED_POLARITY BIT(5) +#define PHY_STAT_MDIX BIT(4) +#define PHY_FORCE_LINK BIT(3) +#define PHY_POWER_SAVING_ENABLE BIT(2) +#define PHY_REMOTE_LOOPBACK BIT(1) + +/* Chip resource */ + +#define PRIO_QUEUES 4 + +#define KS_PRIO_IN_REG 4 + +#define TOTAL_PORT_NUM 5 + +/* Host port can only be last of them. */ +#define SWITCH_PORT_NUM (TOTAL_PORT_NUM - 1) + +#define KSZ8795_COUNTER_NUM 0x20 +#define TOTAL_KSZ8795_COUNTER_NUM (KSZ8795_COUNTER_NUM + 4) + +#define SWITCH_COUNTER_NUM KSZ8795_COUNTER_NUM +#define TOTAL_SWITCH_COUNTER_NUM TOTAL_KSZ8795_COUNTER_NUM + +/* Common names used by other drivers */ + +#define P_BCAST_STORM_CTRL REG_PORT_CTRL_0 +#define P_PRIO_CTRL REG_PORT_CTRL_0 +#define P_TAG_CTRL REG_PORT_CTRL_0 +#define P_MIRROR_CTRL REG_PORT_CTRL_1 +#define P_802_1P_CTRL REG_PORT_CTRL_2 +#define P_STP_CTRL REG_PORT_CTRL_2 +#define P_LOCAL_CTRL REG_PORT_CTRL_7 +#define P_REMOTE_STATUS REG_PORT_STATUS_0 +#define P_FORCE_CTRL REG_PORT_CTRL_9 +#define P_NEG_RESTART_CTRL REG_PORT_CTRL_10 +#define P_SPEED_STATUS REG_PORT_STATUS_1 +#define P_LINK_STATUS REG_PORT_STATUS_2 +#define P_PASS_ALL_CTRL REG_PORT_CTRL_12 +#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12 +#define P_DROP_TAG_CTRL REG_PORT_CTRL_13 +#define P_RATE_LIMIT_CTRL REG_PORT_RATE_LIMIT + +#define S_UNKNOWN_DA_CTRL REG_SWITCH_CTRL_12 +#define S_FORWARD_INVALID_VID_CTRL REG_FORWARD_INVALID_VID + +#define S_FLUSH_TABLE_CTRL REG_SW_CTRL_0 +#define S_LINK_AGING_CTRL REG_SW_CTRL_0 +#define S_HUGE_PACKET_CTRL REG_SW_CTRL_1 +#define S_MIRROR_CTRL REG_SW_CTRL_3 +#define S_REPLACE_VID_CTRL REG_SW_CTRL_4 +#define S_PASS_PAUSE_CTRL REG_SW_CTRL_10 +#define S_TAIL_TAG_CTRL REG_SW_CTRL_10 +#define S_802_1P_PRIO_CTRL REG_SW_CTRL_12 +#define S_TOS_PRIO_CTRL REG_TOS_PRIO_CTRL_0 +#define S_IPV6_MLD_CTRL REG_SW_CTRL_21 + +#define IND_ACC_TABLE(table) ((table) << 8) + +/* Driver set switch broadcast storm protection at 10% rate. */ +#define BROADCAST_STORM_PROT_RATE 10 + +/* 148,800 frames * 67 ms / 100 */ +#define BROADCAST_STORM_VALUE 9969 + +/** + * STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF + * STATIC_MAC_TABLE_FWD_PORTS 00-001F0000-00000000 + * STATIC_MAC_TABLE_VALID 00-00200000-00000000 + * STATIC_MAC_TABLE_OVERRIDE 00-00400000-00000000 + * STATIC_MAC_TABLE_USE_FID 00-00800000-00000000 + * STATIC_MAC_TABLE_FID 00-7F000000-00000000 + */ + +#define STATIC_MAC_TABLE_ADDR 0x0000FFFF +#define STATIC_MAC_TABLE_FWD_PORTS 0x001F0000 +#define STATIC_MAC_TABLE_VALID 0x00200000 +#define STATIC_MAC_TABLE_OVERRIDE 0x00400000 +#define STATIC_MAC_TABLE_USE_FID 0x00800000 +#define STATIC_MAC_TABLE_FID 0x7F000000 + +#define STATIC_MAC_FWD_PORTS_S 16 +#define STATIC_MAC_FID_S 24 + +/** + * VLAN_TABLE_FID 00-007F007F-007F007F + * VLAN_TABLE_MEMBERSHIP 00-0F800F80-0F800F80 + * VLAN_TABLE_VALID 00-10001000-10001000 + */ + +#define VLAN_TABLE_FID 0x007F +#define VLAN_TABLE_MEMBERSHIP 0x0F80 +#define VLAN_TABLE_VALID 0x1000 + +#define VLAN_TABLE_MEMBERSHIP_S 7 +#define VLAN_TABLE_S 16 + +/** + * DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF + * DYNAMIC_MAC_TABLE_FID 00-007F0000-00000000 + * DYNAMIC_MAC_TABLE_NOT_READY 00-00800000-00000000 + * DYNAMIC_MAC_TABLE_SRC_PORT 00-07000000-00000000 + * DYNAMIC_MAC_TABLE_TIMESTAMP 00-18000000-00000000 + * DYNAMIC_MAC_TABLE_ENTRIES 7F-E0000000-00000000 + * DYNAMIC_MAC_TABLE_MAC_EMPTY 80-00000000-00000000 + */ + +#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF +#define DYNAMIC_MAC_TABLE_FID 0x007F0000 +#define DYNAMIC_MAC_TABLE_SRC_PORT 0x07000000 +#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x18000000 +#define DYNAMIC_MAC_TABLE_ENTRIES 0xE0000000 + +#define DYNAMIC_MAC_TABLE_NOT_READY 0x80 + +#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x7F +#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x80 + +#define DYNAMIC_MAC_FID_S 16 +#define DYNAMIC_MAC_SRC_PORT_S 24 +#define DYNAMIC_MAC_TIMESTAMP_S 27 +#define DYNAMIC_MAC_ENTRIES_S 29 +#define DYNAMIC_MAC_ENTRIES_H_S 3 + +/** + * MIB_COUNTER_VALUE 00-00000000-3FFFFFFF + * MIB_TOTAL_BYTES 00-0000000F-FFFFFFFF + * MIB_PACKET_DROPPED 00-00000000-0000FFFF + * MIB_COUNTER_VALID 00-00000020-00000000 + * MIB_COUNTER_OVERFLOW 00-00000040-00000000 + */ + +#define MIB_COUNTER_OVERFLOW BIT(6) +#define MIB_COUNTER_VALID BIT(5) + +#define MIB_COUNTER_VALUE 0x3FFFFFFF + +#define KS_MIB_TOTAL_RX_0 0x100 +#define KS_MIB_TOTAL_TX_0 0x101 +#define KS_MIB_PACKET_DROPPED_RX_0 0x102 +#define KS_MIB_PACKET_DROPPED_TX_0 0x103 +#define KS_MIB_TOTAL_RX_1 0x104 +#define KS_MIB_TOTAL_TX_1 0x105 +#define KS_MIB_PACKET_DROPPED_TX_1 0x106 +#define KS_MIB_PACKET_DROPPED_RX_1 0x107 +#define KS_MIB_TOTAL_RX_2 0x108 +#define KS_MIB_TOTAL_TX_2 0x109 +#define KS_MIB_PACKET_DROPPED_TX_2 0x10A +#define KS_MIB_PACKET_DROPPED_RX_2 0x10B +#define KS_MIB_TOTAL_RX_3 0x10C +#define KS_MIB_TOTAL_TX_3 0x10D +#define KS_MIB_PACKET_DROPPED_TX_3 0x10E +#define KS_MIB_PACKET_DROPPED_RX_3 0x10F +#define KS_MIB_TOTAL_RX_4 0x110 +#define KS_MIB_TOTAL_TX_4 0x111 +#define KS_MIB_PACKET_DROPPED_TX_4 0x112 +#define KS_MIB_PACKET_DROPPED_RX_4 0x113 + +#define MIB_PACKET_DROPPED 0x0000FFFF + +#define MIB_TOTAL_BYTES_H 0x0000000F + +#define TAIL_TAG_OVERRIDE BIT(6) +#define TAIL_TAG_LOOKUP BIT(7) + +#define VLAN_TABLE_ENTRIES (4096 / 4) +#define FID_ENTRIES 128 + +#endif diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c new file mode 100644 index 000000000000..d0f8153e86b7 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Microchip KSZ8795 series register access through SPI + * + * Copyright (C) 2017 Microchip Technology Inc. + * Tristram Ha <Tristram.Ha@microchip.com> + */ + +#include <asm/unaligned.h> + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/spi/spi.h> + +#include "ksz_common.h" + +#define SPI_ADDR_SHIFT 12 +#define SPI_ADDR_ALIGN 3 +#define SPI_TURNAROUND_SHIFT 1 + +KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT, + SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN); + +static int ksz8795_spi_probe(struct spi_device *spi) +{ + struct ksz_device *dev; + int i, ret; + + dev = ksz_switch_alloc(&spi->dev, spi); + if (!dev) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) { + dev->regmap[i] = devm_regmap_init_spi(spi, + &ksz8795_regmap_config + [i]); + if (IS_ERR(dev->regmap[i])) { + ret = PTR_ERR(dev->regmap[i]); + dev_err(&spi->dev, + "Failed to initialize regmap%i: %d\n", + ksz8795_regmap_config[i].val_bits, ret); + return ret; + } + } + + if (spi->dev.platform_data) + dev->pdata = spi->dev.platform_data; + + ret = ksz8795_switch_register(dev); + + /* Main DSA driver may not be started yet. */ + if (ret) + return ret; + + spi_set_drvdata(spi, dev); + + return 0; +} + +static int ksz8795_spi_remove(struct spi_device *spi) +{ + struct ksz_device *dev = spi_get_drvdata(spi); + + if (dev) + ksz_switch_remove(dev); + + return 0; +} + +static void ksz8795_spi_shutdown(struct spi_device *spi) +{ + struct ksz_device *dev = spi_get_drvdata(spi); + + if (dev && dev->dev_ops->shutdown) + dev->dev_ops->shutdown(dev); +} + +static const struct of_device_id ksz8795_dt_ids[] = { + { .compatible = "microchip,ksz8765" }, + { .compatible = "microchip,ksz8794" }, + { .compatible = "microchip,ksz8795" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ksz8795_dt_ids); + +static struct spi_driver ksz8795_spi_driver = { + .driver = { + .name = "ksz8795-switch", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(ksz8795_dt_ids), + }, + .probe = ksz8795_spi_probe, + .remove = ksz8795_spi_remove, + .shutdown = ksz8795_spi_shutdown, +}; + +module_spi_driver(ksz8795_spi_driver); + +MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>"); +MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index a8c97f7a79b7..50ffc63d6231 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -14,7 +14,6 @@ #include <net/dsa.h> #include <net/switchdev.h> -#include "ksz_priv.h" #include "ksz9477_reg.h" #include "ksz_common.h" @@ -1530,6 +1529,15 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = { .cpu_ports = 0x07, /* can be configured as cpu port */ .port_cnt = 3, /* total port count */ }, + { + .chip_id = 0x00956700, + .dev_name = "KSZ9567", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x7F, /* can be configured as cpu port */ + .port_cnt = 7, /* total physical port count */ + }, }; static int ksz9477_switch_init(struct ksz_device *dev) diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c new file mode 100644 index 000000000000..0b1e01f0873d --- /dev/null +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip KSZ9477 series register access through I2C + * + * Copyright (C) 2018-2019 Microchip Technology Inc. + */ + +#include <linux/i2c.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/regmap.h> + +#include "ksz_common.h" + +KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0); + +static int ksz9477_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *i2c_id) +{ + struct ksz_device *dev; + int i, ret; + + dev = ksz_switch_alloc(&i2c->dev, i2c); + if (!dev) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) { + dev->regmap[i] = devm_regmap_init_i2c(i2c, + &ksz9477_regmap_config[i]); + if (IS_ERR(dev->regmap[i])) { + ret = PTR_ERR(dev->regmap[i]); + dev_err(&i2c->dev, + "Failed to initialize regmap%i: %d\n", + ksz9477_regmap_config[i].val_bits, ret); + return ret; + } + } + + if (i2c->dev.platform_data) + dev->pdata = i2c->dev.platform_data; + + ret = ksz9477_switch_register(dev); + + /* Main DSA driver may not be started yet. */ + if (ret) + return ret; + + i2c_set_clientdata(i2c, dev); + + return 0; +} + +static int ksz9477_i2c_remove(struct i2c_client *i2c) +{ + struct ksz_device *dev = i2c_get_clientdata(i2c); + + ksz_switch_remove(dev); + + return 0; +} + +static void ksz9477_i2c_shutdown(struct i2c_client *i2c) +{ + struct ksz_device *dev = i2c_get_clientdata(i2c); + + if (dev && dev->dev_ops->shutdown) + dev->dev_ops->shutdown(dev); +} + +static const struct i2c_device_id ksz9477_i2c_id[] = { + { "ksz9477-switch", 0 }, + {}, +}; + +MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id); + +static const struct of_device_id ksz9477_dt_ids[] = { + { .compatible = "microchip,ksz9477" }, + { .compatible = "microchip,ksz9897" }, + { .compatible = "microchip,ksz9567" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); + +static struct i2c_driver ksz9477_i2c_driver = { + .driver = { + .name = "ksz9477-switch", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(ksz9477_dt_ids), + }, + .probe = ksz9477_i2c_probe, + .remove = ksz9477_i2c_remove, + .shutdown = ksz9477_i2c_shutdown, + .id_table = ksz9477_i2c_id, +}; + +module_i2c_driver(ksz9477_i2c_driver); + +MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>"); +MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch I2C access Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index 098b01e4ed1a..f4198d6f72be 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -13,7 +13,6 @@ #include <linux/regmap.h> #include <linux/spi/spi.h> -#include "ksz_priv.h" #include "ksz_common.h" #define SPI_ADDR_SHIFT 24 @@ -82,6 +81,7 @@ static const struct of_device_id ksz9477_dt_ids[] = { { .compatible = "microchip,ksz9893" }, { .compatible = "microchip,ksz9563" }, { .compatible = "microchip,ksz8563" }, + { .compatible = "microchip,ksz9567" }, {}, }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index a3d2d67894bd..b0b870f0c252 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -18,17 +18,7 @@ #include <net/dsa.h> #include <net/switchdev.h> -#include "ksz_priv.h" - -void ksz_port_cleanup(struct ksz_device *dev, int port) -{ - /* Common code for port cleanup. */ - mutex_lock(&dev->dev_mutex); - dev->on_ports &= ~(1 << port); - dev->live_ports &= ~(1 << port); - mutex_unlock(&dev->dev_mutex); -} -EXPORT_SYMBOL_GPL(ksz_port_cleanup); +#include "ksz_common.h" void ksz_update_port_member(struct ksz_device *dev, int port) { @@ -371,9 +361,13 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct ksz_device *dev = ds->priv; + if (!dsa_is_user_port(ds, port)) + return 0; + /* setup slave port */ dev->dev_ops->port_setup(dev, port, false); - dev->dev_ops->phy_setup(dev, port, phy); + if (dev->dev_ops->phy_setup) + dev->dev_ops->phy_setup(dev, port, phy); /* port_stp_state_set() will be called after to enable the port so * there is no need to do anything. @@ -387,6 +381,9 @@ void ksz_disable_port(struct dsa_switch *ds, int port) { struct ksz_device *dev = ds->priv; + if (!dsa_is_user_port(ds, port)) + return; + dev->on_ports &= ~(1 << port); dev->live_ports &= ~(1 << port); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 72ec250b9540..a24d8e61fbe7 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -7,9 +7,152 @@ #ifndef __KSZ_COMMON_H #define __KSZ_COMMON_H +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/phy.h> #include <linux/regmap.h> +#include <net/dsa.h> + +struct vlan_table { + u32 table[3]; +}; + +struct ksz_port_mib { + struct mutex cnt_mutex; /* structure access */ + u8 cnt_ptr; + u64 *counters; +}; + +struct ksz_port { + u16 member; + u16 vid_member; + int stp_state; + struct phy_device phydev; + + u32 on:1; /* port is not disabled by hardware */ + u32 phy:1; /* port has a PHY */ + u32 fiber:1; /* port is fiber */ + u32 sgmii:1; /* port is SGMII */ + u32 force:1; + u32 read:1; /* read MIB counters in background */ + u32 freeze:1; /* MIB counter freeze is enabled */ + + struct ksz_port_mib mib; +}; + +struct ksz_device { + struct dsa_switch *ds; + struct ksz_platform_data *pdata; + const char *name; + + struct mutex dev_mutex; /* device access */ + struct mutex stats_mutex; /* status access */ + struct mutex alu_mutex; /* ALU access */ + struct mutex vlan_mutex; /* vlan access */ + const struct ksz_dev_ops *dev_ops; + + struct device *dev; + struct regmap *regmap[3]; + + void *priv; + + struct gpio_desc *reset_gpio; /* Optional reset GPIO */ + + /* chip specific data */ + u32 chip_id; + int num_vlans; + int num_alus; + int num_statics; + int cpu_port; /* port connected to CPU */ + int cpu_ports; /* port bitmap can be cpu port */ + int phy_port_cnt; + int port_cnt; + int reg_mib_cnt; + int mib_cnt; + int mib_port_cnt; + int last_port; /* ports after that not used */ + phy_interface_t interface; + u32 regs_size; + bool phy_errata_9477; + bool synclko_125; + + struct vlan_table *vlan_cache; + + struct ksz_port *ports; + struct timer_list mib_read_timer; + struct work_struct mib_read; + unsigned long mib_read_interval; + u16 br_member; + u16 member; + u16 live_ports; + u16 on_ports; /* ports enabled by DSA */ + u16 rx_ports; + u16 tx_ports; + u16 mirror_rx; + u16 mirror_tx; + u32 features; /* chip specific features */ + u32 overrides; /* chip functions set by user */ + u16 host_mask; + u16 port_mask; +}; + +struct alu_struct { + /* entry 1 */ + u8 is_static:1; + u8 is_src_filter:1; + u8 is_dst_filter:1; + u8 prio_age:3; + u32 _reserv_0_1:23; + u8 mstp:3; + /* entry 2 */ + u8 is_override:1; + u8 is_use_fid:1; + u32 _reserv_1_1:23; + u8 port_forward:7; + /* entry 3 & 4*/ + u32 _reserv_2_1:9; + u8 fid:7; + u8 mac[ETH_ALEN]; +}; + +struct ksz_dev_ops { + u32 (*get_port_addr)(int port, int offset); + void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); + void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); + void (*phy_setup)(struct ksz_device *dev, int port, + struct phy_device *phy); + void (*port_cleanup)(struct ksz_device *dev, int port); + void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); + void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); + void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val); + int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr, + u8 *fid, u8 *src_port, u8 *timestamp, + u16 *entries); + int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr, + struct alu_struct *alu); + void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr, + struct alu_struct *alu); + void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr, + u64 *cnt); + void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt); + void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze); + void (*port_init_cnt)(struct ksz_device *dev, int port); + int (*shutdown)(struct ksz_device *dev); + int (*detect)(struct ksz_device *dev); + int (*init)(struct ksz_device *dev); + void (*exit)(struct ksz_device *dev); +}; + +struct ksz_device *ksz_switch_alloc(struct device *base, void *priv); +int ksz_switch_register(struct ksz_device *dev, + const struct ksz_dev_ops *ops); +void ksz_switch_remove(struct ksz_device *dev); + +int ksz8795_switch_register(struct ksz_device *dev); +int ksz9477_switch_register(struct ksz_device *dev); -void ksz_port_cleanup(struct ksz_device *dev, int port); void ksz_update_port_member(struct ksz_device *dev, int port); void ksz_init_mib_timer(struct ksz_device *dev); @@ -68,6 +211,22 @@ static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val) return ret; } +static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val) +{ + u32 value[2]; + int ret; + + ret = regmap_bulk_read(dev->regmap[2], reg, value, 2); + if (!ret) { + /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */ + value[0] = swab32(value[0]); + value[1] = swab32(value[1]); + *val = swab64((u64)*value); + } + + return ret; +} + static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value) { return regmap_write(dev->regmap[0], reg, value); @@ -83,6 +242,18 @@ static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value) return regmap_write(dev->regmap[2], reg, value); } +static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value) +{ + u32 val[2]; + + /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */ + value = swab64(value); + val[0] = swab32(value & 0xffffffffULL); + val[1] = swab32(value >> 32ULL); + + return regmap_bulk_write(dev->regmap[2], reg, val, 2); +} + static inline void ksz_pread8(struct ksz_device *dev, int port, int offset, u8 *data) { @@ -123,6 +294,8 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset, #define KSZ_SPI_OP_RD 3 #define KSZ_SPI_OP_WR 2 +#define swabnot_used(x) 0 + #define KSZ_SPI_OP_FLAG_MASK(opcode, swp, regbits, regpad) \ swab##swp((opcode) << ((regbits) + (regpad))) diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h deleted file mode 100644 index beacf0e40f42..000000000000 --- a/drivers/net/dsa/microchip/ksz_priv.h +++ /dev/null @@ -1,155 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Microchip KSZ series switch common definitions - * - * Copyright (C) 2017-2019 Microchip Technology Inc. - */ - -#ifndef __KSZ_PRIV_H -#define __KSZ_PRIV_H - -#include <linux/kernel.h> -#include <linux/mutex.h> -#include <linux/phy.h> -#include <linux/etherdevice.h> -#include <net/dsa.h> - -struct vlan_table { - u32 table[3]; -}; - -struct ksz_port_mib { - struct mutex cnt_mutex; /* structure access */ - u8 cnt_ptr; - u64 *counters; -}; - -struct ksz_port { - u16 member; - u16 vid_member; - int stp_state; - struct phy_device phydev; - - u32 on:1; /* port is not disabled by hardware */ - u32 phy:1; /* port has a PHY */ - u32 fiber:1; /* port is fiber */ - u32 sgmii:1; /* port is SGMII */ - u32 force:1; - u32 read:1; /* read MIB counters in background */ - u32 freeze:1; /* MIB counter freeze is enabled */ - - struct ksz_port_mib mib; -}; - -struct ksz_device { - struct dsa_switch *ds; - struct ksz_platform_data *pdata; - const char *name; - - struct mutex dev_mutex; /* device access */ - struct mutex stats_mutex; /* status access */ - struct mutex alu_mutex; /* ALU access */ - struct mutex vlan_mutex; /* vlan access */ - const struct ksz_dev_ops *dev_ops; - - struct device *dev; - struct regmap *regmap[3]; - - void *priv; - - struct gpio_desc *reset_gpio; /* Optional reset GPIO */ - - /* chip specific data */ - u32 chip_id; - int num_vlans; - int num_alus; - int num_statics; - int cpu_port; /* port connected to CPU */ - int cpu_ports; /* port bitmap can be cpu port */ - int phy_port_cnt; - int port_cnt; - int reg_mib_cnt; - int mib_cnt; - int mib_port_cnt; - int last_port; /* ports after that not used */ - phy_interface_t interface; - u32 regs_size; - bool phy_errata_9477; - bool synclko_125; - - struct vlan_table *vlan_cache; - - struct ksz_port *ports; - struct timer_list mib_read_timer; - struct work_struct mib_read; - unsigned long mib_read_interval; - u16 br_member; - u16 member; - u16 live_ports; - u16 on_ports; /* ports enabled by DSA */ - u16 rx_ports; - u16 tx_ports; - u16 mirror_rx; - u16 mirror_tx; - u32 features; /* chip specific features */ - u32 overrides; /* chip functions set by user */ - u16 host_mask; - u16 port_mask; -}; - -struct alu_struct { - /* entry 1 */ - u8 is_static:1; - u8 is_src_filter:1; - u8 is_dst_filter:1; - u8 prio_age:3; - u32 _reserv_0_1:23; - u8 mstp:3; - /* entry 2 */ - u8 is_override:1; - u8 is_use_fid:1; - u32 _reserv_1_1:23; - u8 port_forward:7; - /* entry 3 & 4*/ - u32 _reserv_2_1:9; - u8 fid:7; - u8 mac[ETH_ALEN]; -}; - -struct ksz_dev_ops { - u32 (*get_port_addr)(int port, int offset); - void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); - void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); - void (*phy_setup)(struct ksz_device *dev, int port, - struct phy_device *phy); - void (*port_cleanup)(struct ksz_device *dev, int port); - void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); - void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); - void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val); - int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr, - u8 *fid, u8 *src_port, u8 *timestamp, - u16 *entries); - int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr, - struct alu_struct *alu); - void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr, - struct alu_struct *alu); - void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr, - u64 *cnt); - void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr, - u64 *dropped, u64 *cnt); - void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze); - void (*port_init_cnt)(struct ksz_device *dev, int port); - int (*shutdown)(struct ksz_device *dev); - int (*detect)(struct ksz_device *dev); - int (*init)(struct ksz_device *dev); - void (*exit)(struct ksz_device *dev); -}; - -struct ksz_device *ksz_switch_alloc(struct device *base, void *priv); -int ksz_switch_register(struct ksz_device *dev, - const struct ksz_dev_ops *ops); -void ksz_switch_remove(struct ksz_device *dev); - -int ksz9477_switch_register(struct ksz_device *dev); - -#endif diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 3181e95586d6..1d8d36de4d20 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -13,7 +13,7 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_platform.h> -#include <linux/phy.h> +#include <linux/phylink.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> @@ -633,61 +633,75 @@ mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset) return ARRAY_SIZE(mt7530_mib); } -static void mt7530_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev) +static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) { struct mt7530_priv *priv = ds->priv; + u8 tx_delay = 0; + int val; - if (phy_is_pseudo_fixed_link(phydev)) { - dev_dbg(priv->dev, "phy-mode for master device = %x\n", - phydev->interface); + mutex_lock(&priv->reg_mutex); - /* Setup TX circuit incluing relevant PAD and driving */ - mt7530_pad_clk_setup(ds, phydev->interface); + val = mt7530_read(priv, MT7530_MHWTRAP); - if (priv->id == ID_MT7530) { - /* Setup RX circuit, relevant PAD and driving on the - * host which must be placed after the setup on the - * device side is all finished. - */ - mt7623_pad_clk_setup(ds); - } - } else { - u16 lcl_adv = 0, rmt_adv = 0; - u8 flowctrl; - u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE; + val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS; + val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL; - switch (phydev->speed) { - case SPEED_1000: - mcr |= PMCR_FORCE_SPEED_1000; - break; - case SPEED_100: - mcr |= PMCR_FORCE_SPEED_100; - break; - } + switch (priv->p5_intf_sel) { + case P5_INTF_SEL_PHY_P0: + /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */ + val |= MHWTRAP_PHY0_SEL; + /* fall through */ + case P5_INTF_SEL_PHY_P4: + /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */ + val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS; - if (phydev->link) - mcr |= PMCR_FORCE_LNK; + /* Setup the MAC by default for the cpu port */ + mt7530_write(priv, MT7530_PMCR_P(5), 0x56300); + break; + case P5_INTF_SEL_GMAC5: + /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */ + val &= ~MHWTRAP_P5_DIS; + break; + case P5_DISABLED: + interface = PHY_INTERFACE_MODE_NA; + break; + default: + dev_err(ds->dev, "Unsupported p5_intf_sel %d\n", + priv->p5_intf_sel); + goto unlock_exit; + } - if (phydev->duplex) { - mcr |= PMCR_FORCE_FDX; + /* Setup RGMII settings */ + if (phy_interface_mode_is_rgmii(interface)) { + val |= MHWTRAP_P5_RGMII_MODE; - if (phydev->pause) - rmt_adv = LPA_PAUSE_CAP; - if (phydev->asym_pause) - rmt_adv |= LPA_PAUSE_ASYM; + /* P5 RGMII RX Clock Control: delay setting for 1000M */ + mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN); - lcl_adv = linkmode_adv_to_lcl_adv_t( - phydev->advertising); - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + /* Don't set delay in DSA mode */ + if (!dsa_is_dsa_port(priv->ds, 5) && + (interface == PHY_INTERFACE_MODE_RGMII_TXID || + interface == PHY_INTERFACE_MODE_RGMII_ID)) + tx_delay = 4; /* n * 0.5 ns */ - if (flowctrl & FLOW_CTRL_TX) - mcr |= PMCR_TX_FC_EN; - if (flowctrl & FLOW_CTRL_RX) - mcr |= PMCR_RX_FC_EN; - } - mt7530_write(priv, MT7530_PMCR_P(port), mcr); + /* P5 RGMII TX Clock Control: delay x */ + mt7530_write(priv, MT7530_P5RGMIITXCR, + CSR_RGMII_TXC_CFG(0x10 + tx_delay)); + + /* reduce P5 RGMII Tx driving, 8mA */ + mt7530_write(priv, MT7530_IO_DRV_CR, + P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1)); } + + mt7530_write(priv, MT7530_MHWTRAP, val); + + dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n", + val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface)); + + priv->p5_interface = interface; + +unlock_exit: + mutex_unlock(&priv->reg_mutex); } static int @@ -698,9 +712,6 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv, mt7530_write(priv, MT7530_PVC_P(port), PORT_SPEC_TAG); - /* Setup the MAC by default for the cpu port */ - mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPUP_LINK); - /* Disable auto learning on the cpu port */ mt7530_set(priv, MT7530_PSC_P(port), SA_DIS); @@ -726,10 +737,10 @@ mt7530_port_enable(struct dsa_switch *ds, int port, { struct mt7530_priv *priv = ds->priv; - mutex_lock(&priv->reg_mutex); + if (!dsa_is_user_port(ds, port)) + return 0; - /* Setup the MAC for the user port */ - mt7530_write(priv, MT7530_PMCR_P(port), PMCR_USERP_LINK); + mutex_lock(&priv->reg_mutex); /* Allow the user port gets connected to the cpu port and also * restore the port matrix if the port is the member of a certain @@ -739,7 +750,7 @@ mt7530_port_enable(struct dsa_switch *ds, int port, priv->ports[port].enable = true; mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, priv->ports[port].pm); - mt7530_port_set_status(priv, port, 1); + mt7530_port_set_status(priv, port, 0); mutex_unlock(&priv->reg_mutex); @@ -751,6 +762,9 @@ mt7530_port_disable(struct dsa_switch *ds, int port) { struct mt7530_priv *priv = ds->priv; + if (!dsa_is_user_port(ds, port)) + return; + mutex_lock(&priv->reg_mutex); /* Clear up all port matrix which could be restored in the next @@ -1226,10 +1240,13 @@ static int mt7530_setup(struct dsa_switch *ds) { struct mt7530_priv *priv = ds->priv; - int ret, i; - u32 id, val; - struct device_node *dn; + struct device_node *phy_node; + struct device_node *mac_np; struct mt7530_dummy_poll p; + phy_interface_t interface; + struct device_node *dn; + u32 id, val; + int ret, i; /* The parent node of master netdev which holds the common system * controller also is the container for two GMACs nodes representing @@ -1299,6 +1316,8 @@ mt7530_setup(struct dsa_switch *ds) val |= MHWTRAP_MANUAL; mt7530_write(priv, MT7530_MHWTRAP, val); + priv->p6_interface = PHY_INTERFACE_MODE_NA; + /* Enable and reset MIB counters */ mt7530_mib_reset(ds); @@ -1315,6 +1334,40 @@ mt7530_setup(struct dsa_switch *ds) mt7530_port_disable(ds, i); } + /* Setup port 5 */ + priv->p5_intf_sel = P5_DISABLED; + interface = PHY_INTERFACE_MODE_NA; + + if (!dsa_is_unused_port(ds, 5)) { + priv->p5_intf_sel = P5_INTF_SEL_GMAC5; + interface = of_get_phy_mode(ds->ports[5].dn); + } else { + /* Scan the ethernet nodes. look for GMAC1, lookup used phy */ + for_each_child_of_node(dn, mac_np) { + if (!of_device_is_compatible(mac_np, + "mediatek,eth-mac")) + continue; + + ret = of_property_read_u32(mac_np, "reg", &id); + if (ret < 0 || id != 1) + continue; + + phy_node = of_parse_phandle(mac_np, "phy-handle", 0); + if (phy_node->parent == priv->dev->of_node->parent) { + interface = of_get_phy_mode(mac_np); + id = of_mdio_parse_addr(ds->dev, phy_node); + if (id == 0) + priv->p5_intf_sel = P5_INTF_SEL_PHY_P0; + if (id == 4) + priv->p5_intf_sel = P5_INTF_SEL_PHY_P4; + } + of_node_put(phy_node); + break; + } + } + + mt7530_setup_port5(ds, interface); + /* Flush the FDB table */ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); if (ret < 0) @@ -1323,6 +1376,216 @@ mt7530_setup(struct dsa_switch *ds) return 0; } +static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct mt7530_priv *priv = ds->priv; + u32 mcr_cur, mcr_new; + + switch (port) { + case 0: /* Internal phy */ + case 1: + case 2: + case 3: + case 4: + if (state->interface != PHY_INTERFACE_MODE_GMII) + return; + break; + case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ + if (priv->p5_interface == state->interface) + break; + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_GMII) + return; + + mt7530_setup_port5(ds, state->interface); + break; + case 6: /* 1st cpu port */ + if (priv->p6_interface == state->interface) + break; + + if (state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_TRGMII) + return; + + /* Setup TX circuit incluing relevant PAD and driving */ + mt7530_pad_clk_setup(ds, state->interface); + + if (priv->id == ID_MT7530) { + /* Setup RX circuit, relevant PAD and driving on the + * host which must be placed after the setup on the + * device side is all finished. + */ + mt7623_pad_clk_setup(ds); + } + + priv->p6_interface = state->interface; + break; + default: + dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); + return; + } + + if (phylink_autoneg_inband(mode)) { + dev_err(ds->dev, "%s: in-band negotiation unsupported\n", + __func__); + return; + } + + mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port)); + mcr_new = mcr_cur; + mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 | + PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN); + mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN | + PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK; + + /* Are we connected to external phy */ + if (port == 5 && dsa_is_user_port(ds, 5)) + mcr_new |= PMCR_EXT_PHY; + + switch (state->speed) { + case SPEED_1000: + mcr_new |= PMCR_FORCE_SPEED_1000; + break; + case SPEED_100: + mcr_new |= PMCR_FORCE_SPEED_100; + break; + } + if (state->duplex == DUPLEX_FULL) { + mcr_new |= PMCR_FORCE_FDX; + if (state->pause & MLO_PAUSE_TX) + mcr_new |= PMCR_TX_FC_EN; + if (state->pause & MLO_PAUSE_RX) + mcr_new |= PMCR_RX_FC_EN; + } + + if (mcr_new != mcr_cur) + mt7530_write(priv, MT7530_PMCR_P(port), mcr_new); +} + +static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + struct mt7530_priv *priv = ds->priv; + + mt7530_port_set_status(priv, port, 0); +} + +static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev) +{ + struct mt7530_priv *priv = ds->priv; + + mt7530_port_set_status(priv, port, 1); +} + +static void mt7530_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + switch (port) { + case 0: /* Internal phy */ + case 1: + case 2: + case 3: + case 4: + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_GMII) + goto unsupported; + break; + case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ + if (state->interface != PHY_INTERFACE_MODE_NA && + !phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_GMII) + goto unsupported; + break; + case 6: /* 1st cpu port */ + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_TRGMII) + goto unsupported; + break; + default: + dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); +unsupported: + linkmode_zero(supported); + return; + } + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + + if (state->interface == PHY_INTERFACE_MODE_TRGMII) { + phylink_set(mask, 1000baseT_Full); + } else { + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + if (state->interface != PHY_INTERFACE_MODE_MII) { + phylink_set(mask, 1000baseT_Half); + phylink_set(mask, 1000baseT_Full); + if (port == 5) + phylink_set(mask, 1000baseX_Full); + } + } + + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); +} + +static int +mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port, + struct phylink_link_state *state) +{ + struct mt7530_priv *priv = ds->priv; + u32 pmsr; + + if (port < 0 || port >= MT7530_NUM_PORTS) + return -EINVAL; + + pmsr = mt7530_read(priv, MT7530_PMSR_P(port)); + + state->link = (pmsr & PMSR_LINK); + state->an_complete = state->link; + state->duplex = !!(pmsr & PMSR_DPX); + + switch (pmsr & PMSR_SPEED_MASK) { + case PMSR_SPEED_10: + state->speed = SPEED_10; + break; + case PMSR_SPEED_100: + state->speed = SPEED_100; + break; + case PMSR_SPEED_1000: + state->speed = SPEED_1000; + break; + default: + state->speed = SPEED_UNKNOWN; + break; + } + + state->pause &= ~(MLO_PAUSE_RX | MLO_PAUSE_TX); + if (pmsr & PMSR_RX_FC) + state->pause |= MLO_PAUSE_RX; + if (pmsr & PMSR_TX_FC) + state->pause |= MLO_PAUSE_TX; + + return 1; +} + static const struct dsa_switch_ops mt7530_switch_ops = { .get_tag_protocol = mtk_get_tag_protocol, .setup = mt7530_setup, @@ -1331,7 +1594,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .phy_write = mt7530_phy_write, .get_ethtool_stats = mt7530_get_ethtool_stats, .get_sset_count = mt7530_get_sset_count, - .adjust_link = mt7530_adjust_link, .port_enable = mt7530_port_enable, .port_disable = mt7530_port_disable, .port_stp_state_set = mt7530_stp_state_set, @@ -1344,6 +1606,11 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .port_vlan_prepare = mt7530_port_vlan_prepare, .port_vlan_add = mt7530_port_vlan_add, .port_vlan_del = mt7530_port_vlan_del, + .phylink_validate = mt7530_phylink_validate, + .phylink_mac_link_state = mt7530_phylink_mac_link_state, + .phylink_mac_config = mt7530_phylink_mac_config, + .phylink_mac_link_down = mt7530_phylink_mac_link_down, + .phylink_mac_link_up = mt7530_phylink_mac_link_up, }; static const struct of_device_id mt7530_of_match[] = { diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index bfac90f48102..ccb9da8cad0d 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -186,6 +186,7 @@ enum mt7530_vlan_port_attr { /* Register for port MAC control register */ #define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100)) #define PMCR_IFG_XMIT(x) (((x) & 0x3) << 18) +#define PMCR_EXT_PHY BIT(17) #define PMCR_MAC_MODE BIT(16) #define PMCR_FORCE_MODE BIT(15) #define PMCR_TX_EN BIT(14) @@ -198,26 +199,20 @@ enum mt7530_vlan_port_attr { #define PMCR_FORCE_SPEED_100 BIT(2) #define PMCR_FORCE_FDX BIT(1) #define PMCR_FORCE_LNK BIT(0) -#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ - PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \ - PMCR_TX_EN | PMCR_RX_EN | \ - PMCR_TX_FC_EN | PMCR_RX_FC_EN) -#define PMCR_CPUP_LINK (PMCR_COMMON_LINK | PMCR_FORCE_MODE | \ - PMCR_FORCE_SPEED_1000 | \ - PMCR_FORCE_FDX | \ - PMCR_FORCE_LNK) -#define PMCR_USERP_LINK PMCR_COMMON_LINK -#define PMCR_FIXED_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ - PMCR_FORCE_MODE | PMCR_TX_EN | \ - PMCR_RX_EN | PMCR_BACKPR_EN | \ - PMCR_BACKOFF_EN | \ - PMCR_FORCE_SPEED_1000 | \ - PMCR_FORCE_FDX | \ - PMCR_FORCE_LNK) -#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \ - PMCR_TX_FC_EN | PMCR_RX_FC_EN) +#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \ + PMCR_FORCE_SPEED_1000) #define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100) +#define PMSR_EEE1G BIT(7) +#define PMSR_EEE100M BIT(6) +#define PMSR_RX_FC BIT(5) +#define PMSR_TX_FC BIT(4) +#define PMSR_SPEED_1000 BIT(3) +#define PMSR_SPEED_100 BIT(2) +#define PMSR_SPEED_10 0x00 +#define PMSR_SPEED_MASK (PMSR_SPEED_100 | PMSR_SPEED_1000) +#define PMSR_DPX BIT(1) +#define PMSR_LINK BIT(0) /* Register for MIB */ #define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100) @@ -251,6 +246,7 @@ enum mt7530_vlan_port_attr { /* Register for hw trap modification */ #define MT7530_MHWTRAP 0x7804 +#define MHWTRAP_PHY0_SEL BIT(20) #define MHWTRAP_MANUAL BIT(16) #define MHWTRAP_P5_MAC_SEL BIT(13) #define MHWTRAP_P6_DIS BIT(8) @@ -408,6 +404,30 @@ struct mt7530_port { u16 pvid; }; +/* Port 5 interface select definitions */ +enum p5_interface_select { + P5_DISABLED = 0, + P5_INTF_SEL_PHY_P0, + P5_INTF_SEL_PHY_P4, + P5_INTF_SEL_GMAC5, +}; + +static const char *p5_intf_modes(unsigned int p5_interface) +{ + switch (p5_interface) { + case P5_DISABLED: + return "DISABLED"; + case P5_INTF_SEL_PHY_P0: + return "PHY P0"; + case P5_INTF_SEL_PHY_P4: + return "PHY P4"; + case P5_INTF_SEL_GMAC5: + return "GMAC5"; + default: + return "unknown"; + } +} + /* struct mt7530_priv - This is the main data structure for holding the state * of the driver * @dev: The device pointer @@ -423,6 +443,8 @@ struct mt7530_port { * @ports: Holding the state among ports * @reg_mutex: The lock for protecting among process accessing * registers + * @p6_interface Holding the current port 6 interface + * @p5_intf_sel: Holding the current port 5 interface select */ struct mt7530_priv { struct device *dev; @@ -435,6 +457,9 @@ struct mt7530_priv { struct gpio_desc *reset; unsigned int id; bool mcm; + phy_interface_t p6_interface; + phy_interface_t p5_interface; + unsigned int p5_intf_sel; struct mt7530_port ports[MT7530_NUM_PORTS]; /* protect among processes for registers access*/ diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile index e85755dde90b..aa645ff86f64 100644 --- a/drivers/net/dsa/mv88e6xxx/Makefile +++ b/drivers/net/dsa/mv88e6xxx/Makefile @@ -10,6 +10,7 @@ mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2_scratch.o mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o mv88e6xxx-objs += phy.o mv88e6xxx-objs += port.o +mv88e6xxx-objs += port_hidden.o mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o mv88e6xxx-objs += serdes.o mv88e6xxx-objs += smi.o diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d0a97eb73a37..6787d560e9e3 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -10,6 +10,7 @@ * Vivien Didelot <vivien.didelot@savoirfairelinux.com> */ +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> @@ -80,6 +81,36 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) return 0; } +int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg, + u16 mask, u16 val) +{ + u16 data; + int err; + int i; + + /* There's no bus specific operation to wait for a mask */ + for (i = 0; i < 16; i++) { + err = mv88e6xxx_read(chip, addr, reg, &data); + if (err) + return err; + + if ((data & mask) == val) + return 0; + + usleep_range(1000, 2000); + } + + dev_err(chip->dev, "Timeout while waiting for switch\n"); + return -ETIMEDOUT; +} + +int mv88e6xxx_wait_bit(struct mv88e6xxx_chip *chip, int addr, int reg, + int bit, int val) +{ + return mv88e6xxx_wait_mask(chip, addr, reg, BIT(bit), + val ? BIT(bit) : 0x0000); +} + struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip) { struct mv88e6xxx_mdio_bus *mdio_bus; @@ -363,45 +394,6 @@ static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip) mv88e6xxx_reg_unlock(chip); } -int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask) -{ - int i; - - for (i = 0; i < 16; i++) { - u16 val; - int err; - - err = mv88e6xxx_read(chip, addr, reg, &val); - if (err) - return err; - - if (!(val & mask)) - return 0; - - usleep_range(1000, 2000); - } - - dev_err(chip->dev, "Timeout while waiting for switch\n"); - return -ETIMEDOUT; -} - -/* Indirect write to single pointer-data register with an Update bit */ -int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update) -{ - u16 val; - int err; - - /* Wait until the previous operation is completed */ - err = mv88e6xxx_wait(chip, addr, reg, BIT(15)); - if (err) - return err; - - /* Set the Update bit to trigger a write operation */ - val = BIT(15) | update; - - return mv88e6xxx_write(chip, addr, reg, val); -} - int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link, int speed, int duplex, int pause, phy_interface_t mode) @@ -425,7 +417,9 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link, */ if (state.link == link && state.speed == speed && - state.duplex == duplex) + state.duplex == duplex && + (state.interface == mode || + state.interface == PHY_INTERFACE_MODE_NA)) return 0; /* Port's MAC control must not be changed unless the link is down */ @@ -1336,9 +1330,7 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) { DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); - struct mv88e6xxx_vtu_entry vlan = { - .vid = chip->info->max_vid, - }; + struct mv88e6xxx_vtu_entry vlan; int i, err; bitmap_zero(fid_bitmap, MV88E6XXX_N_FID); @@ -1353,6 +1345,9 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) } /* Set every FID bit used by the VLAN entries */ + vlan.vid = chip->info->max_vid; + vlan.valid = false; + do { err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) @@ -1375,51 +1370,11 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) return mv88e6xxx_g1_atu_flush(chip, *fid, true); } -static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, - struct mv88e6xxx_vtu_entry *entry, bool new) -{ - int err; - - if (!vid) - return -EOPNOTSUPP; - - entry->vid = vid - 1; - entry->valid = false; - - err = mv88e6xxx_vtu_getnext(chip, entry); - if (err) - return err; - - if (entry->vid == vid && entry->valid) - return 0; - - if (new) { - int i; - - /* Initialize a fresh VLAN entry */ - memset(entry, 0, sizeof(*entry)); - entry->valid = true; - entry->vid = vid; - - /* Exclude all ports */ - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) - entry->member[i] = - MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER; - - return mv88e6xxx_atu_new(chip, &entry->fid); - } - - /* switchdev expects -EOPNOTSUPP to honor software VLANs */ - return -EOPNOTSUPP; -} - static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, u16 vid_begin, u16 vid_end) { struct mv88e6xxx_chip *chip = ds->priv; - struct mv88e6xxx_vtu_entry vlan = { - .vid = vid_begin - 1, - }; + struct mv88e6xxx_vtu_entry vlan; int i, err; /* DSA and CPU ports have to be members of multiple vlans */ @@ -1429,12 +1384,13 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, if (!vid_begin) return -EOPNOTSUPP; - mv88e6xxx_reg_lock(chip); + vlan.vid = vid_begin - 1; + vlan.valid = false; do { err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) - goto unlock; + return err; if (!vlan.valid) break; @@ -1463,15 +1419,11 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n", port, vlan.vid, i, netdev_name(dsa_to_port(ds, i)->bridge_dev)); - err = -EOPNOTSUPP; - goto unlock; + return -EOPNOTSUPP; } } while (vlan.vid < vid_end); -unlock: - mv88e6xxx_reg_unlock(chip); - - return err; + return 0; } static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, @@ -1505,59 +1457,281 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, /* If the requested port doesn't belong to the same bridge as the VLAN * members, do not support it (yet) and fallback to software VLAN. */ + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin, vlan->vid_end); - if (err) - return err; + mv88e6xxx_reg_unlock(chip); /* We don't need any dynamic resource from the kernel (yet), * so skip the prepare phase. */ - return 0; + return err; } static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, const unsigned char *addr, u16 vid, u8 state) { - struct mv88e6xxx_vtu_entry vlan; struct mv88e6xxx_atu_entry entry; + struct mv88e6xxx_vtu_entry vlan; + u16 fid; int err; /* Null VLAN ID corresponds to the port private database */ - if (vid == 0) - err = mv88e6xxx_port_get_fid(chip, port, &vlan.fid); - else - err = mv88e6xxx_vtu_get(chip, vid, &vlan, false); - if (err) - return err; + if (vid == 0) { + err = mv88e6xxx_port_get_fid(chip, port, &fid); + if (err) + return err; + } else { + vlan.vid = vid - 1; + vlan.valid = false; + + err = mv88e6xxx_vtu_getnext(chip, &vlan); + if (err) + return err; + + /* switchdev expects -EOPNOTSUPP to honor software VLANs */ + if (vlan.vid != vid || !vlan.valid) + return -EOPNOTSUPP; + + fid = vlan.fid; + } - entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED; + entry.state = 0; ether_addr_copy(entry.mac, addr); eth_addr_dec(entry.mac); - err = mv88e6xxx_g1_atu_getnext(chip, vlan.fid, &entry); + err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry); if (err) return err; /* Initialize a fresh ATU entry if it isn't found */ - if (entry.state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED || - !ether_addr_equal(entry.mac, addr)) { + if (!entry.state || !ether_addr_equal(entry.mac, addr)) { memset(&entry, 0, sizeof(entry)); ether_addr_copy(entry.mac, addr); } /* Purge the ATU entry only if no port is using it anymore */ - if (state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED) { + if (!state) { entry.portvec &= ~BIT(port); if (!entry.portvec) - entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED; + entry.state = 0; } else { entry.portvec |= BIT(port); entry.state = state; } - return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry); + return mv88e6xxx_g1_atu_loadpurge(chip, fid, &entry); +} + +static int mv88e6xxx_policy_apply(struct mv88e6xxx_chip *chip, int port, + const struct mv88e6xxx_policy *policy) +{ + enum mv88e6xxx_policy_mapping mapping = policy->mapping; + enum mv88e6xxx_policy_action action = policy->action; + const u8 *addr = policy->addr; + u16 vid = policy->vid; + u8 state; + int err; + int id; + + if (!chip->info->ops->port_set_policy) + return -EOPNOTSUPP; + + switch (mapping) { + case MV88E6XXX_POLICY_MAPPING_DA: + case MV88E6XXX_POLICY_MAPPING_SA: + if (action == MV88E6XXX_POLICY_ACTION_NORMAL) + state = 0; /* Dissociate the port and address */ + else if (action == MV88E6XXX_POLICY_ACTION_DISCARD && + is_multicast_ether_addr(addr)) + state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_POLICY; + else if (action == MV88E6XXX_POLICY_ACTION_DISCARD && + is_unicast_ether_addr(addr)) + state = MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_POLICY; + else + return -EOPNOTSUPP; + + err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, + state); + if (err) + return err; + break; + default: + return -EOPNOTSUPP; + } + + /* Skip the port's policy clearing if the mapping is still in use */ + if (action == MV88E6XXX_POLICY_ACTION_NORMAL) + idr_for_each_entry(&chip->policies, policy, id) + if (policy->port == port && + policy->mapping == mapping && + policy->action != action) + return 0; + + return chip->info->ops->port_set_policy(chip, port, mapping, action); +} + +static int mv88e6xxx_policy_insert(struct mv88e6xxx_chip *chip, int port, + struct ethtool_rx_flow_spec *fs) +{ + struct ethhdr *mac_entry = &fs->h_u.ether_spec; + struct ethhdr *mac_mask = &fs->m_u.ether_spec; + enum mv88e6xxx_policy_mapping mapping; + enum mv88e6xxx_policy_action action; + struct mv88e6xxx_policy *policy; + u16 vid = 0; + u8 *addr; + int err; + int id; + + if (fs->location != RX_CLS_LOC_ANY) + return -EINVAL; + + if (fs->ring_cookie == RX_CLS_FLOW_DISC) + action = MV88E6XXX_POLICY_ACTION_DISCARD; + else + return -EOPNOTSUPP; + + switch (fs->flow_type & ~FLOW_EXT) { + case ETHER_FLOW: + if (!is_zero_ether_addr(mac_mask->h_dest) && + is_zero_ether_addr(mac_mask->h_source)) { + mapping = MV88E6XXX_POLICY_MAPPING_DA; + addr = mac_entry->h_dest; + } else if (is_zero_ether_addr(mac_mask->h_dest) && + !is_zero_ether_addr(mac_mask->h_source)) { + mapping = MV88E6XXX_POLICY_MAPPING_SA; + addr = mac_entry->h_source; + } else { + /* Cannot support DA and SA mapping in the same rule */ + return -EOPNOTSUPP; + } + break; + default: + return -EOPNOTSUPP; + } + + if ((fs->flow_type & FLOW_EXT) && fs->m_ext.vlan_tci) { + if (fs->m_ext.vlan_tci != 0xffff) + return -EOPNOTSUPP; + vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK; + } + + idr_for_each_entry(&chip->policies, policy, id) { + if (policy->port == port && policy->mapping == mapping && + policy->action == action && policy->vid == vid && + ether_addr_equal(policy->addr, addr)) + return -EEXIST; + } + + policy = devm_kzalloc(chip->dev, sizeof(*policy), GFP_KERNEL); + if (!policy) + return -ENOMEM; + + fs->location = 0; + err = idr_alloc_u32(&chip->policies, policy, &fs->location, 0xffffffff, + GFP_KERNEL); + if (err) { + devm_kfree(chip->dev, policy); + return err; + } + + memcpy(&policy->fs, fs, sizeof(*fs)); + ether_addr_copy(policy->addr, addr); + policy->mapping = mapping; + policy->action = action; + policy->port = port; + policy->vid = vid; + + err = mv88e6xxx_policy_apply(chip, port, policy); + if (err) { + idr_remove(&chip->policies, fs->location); + devm_kfree(chip->dev, policy); + return err; + } + + return 0; +} + +static int mv88e6xxx_get_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct ethtool_rx_flow_spec *fs = &rxnfc->fs; + struct mv88e6xxx_chip *chip = ds->priv; + struct mv88e6xxx_policy *policy; + int err; + int id; + + mv88e6xxx_reg_lock(chip); + + switch (rxnfc->cmd) { + case ETHTOOL_GRXCLSRLCNT: + rxnfc->data = 0; + rxnfc->data |= RX_CLS_LOC_SPECIAL; + rxnfc->rule_cnt = 0; + idr_for_each_entry(&chip->policies, policy, id) + if (policy->port == port) + rxnfc->rule_cnt++; + err = 0; + break; + case ETHTOOL_GRXCLSRULE: + err = -ENOENT; + policy = idr_find(&chip->policies, fs->location); + if (policy) { + memcpy(fs, &policy->fs, sizeof(*fs)); + err = 0; + } + break; + case ETHTOOL_GRXCLSRLALL: + rxnfc->data = 0; + rxnfc->rule_cnt = 0; + idr_for_each_entry(&chip->policies, policy, id) + if (policy->port == port) + rule_locs[rxnfc->rule_cnt++] = id; + err = 0; + break; + default: + err = -EOPNOTSUPP; + break; + } + + mv88e6xxx_reg_unlock(chip); + + return err; +} + +static int mv88e6xxx_set_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *rxnfc) +{ + struct ethtool_rx_flow_spec *fs = &rxnfc->fs; + struct mv88e6xxx_chip *chip = ds->priv; + struct mv88e6xxx_policy *policy; + int err; + + mv88e6xxx_reg_lock(chip); + + switch (rxnfc->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = mv88e6xxx_policy_insert(chip, port, fs); + break; + case ETHTOOL_SRXCLSRLDEL: + err = -ENOENT; + policy = idr_remove(&chip->policies, fs->location); + if (policy) { + policy->action = MV88E6XXX_POLICY_ACTION_NORMAL; + err = mv88e6xxx_policy_apply(chip, port, policy); + devm_kfree(chip->dev, policy); + } + break; + default: + err = -EOPNOTSUPP; + break; + } + + mv88e6xxx_reg_unlock(chip); + + return err; } static int mv88e6xxx_port_add_broadcast(struct mv88e6xxx_chip *chip, int port, @@ -1583,23 +1757,58 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid) return 0; } -static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port, +static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port, u16 vid, u8 member) { + const u8 non_member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER; struct mv88e6xxx_vtu_entry vlan; - int err; + int i, err; - err = mv88e6xxx_vtu_get(chip, vid, &vlan, true); - if (err) - return err; + if (!vid) + return -EOPNOTSUPP; - vlan.member[port] = member; + vlan.vid = vid - 1; + vlan.valid = false; - err = mv88e6xxx_vtu_loadpurge(chip, &vlan); + err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) return err; - return mv88e6xxx_broadcast_setup(chip, vid); + if (vlan.vid != vid || !vlan.valid) { + memset(&vlan, 0, sizeof(vlan)); + + err = mv88e6xxx_atu_new(chip, &vlan.fid); + if (err) + return err; + + for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) + if (i == port) + vlan.member[i] = member; + else + vlan.member[i] = non_member; + + vlan.vid = vid; + vlan.valid = true; + + err = mv88e6xxx_vtu_loadpurge(chip, &vlan); + if (err) + return err; + + err = mv88e6xxx_broadcast_setup(chip, vlan.vid); + if (err) + return err; + } else if (vlan.member[port] != member) { + vlan.member[port] = member; + + err = mv88e6xxx_vtu_loadpurge(chip, &vlan); + if (err) + return err; + } else { + dev_info(chip->dev, "p%d: already a member of VLAN %d\n", + port, vid); + } + + return 0; } static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, @@ -1624,7 +1833,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, mv88e6xxx_reg_lock(chip); for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) - if (_mv88e6xxx_port_vlan_add(chip, port, vid, member)) + if (mv88e6xxx_port_vlan_join(chip, port, vid, member)) dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port, vid, untagged ? 'u' : 't'); @@ -1635,18 +1844,27 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, mv88e6xxx_reg_unlock(chip); } -static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip, - int port, u16 vid) +static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip, + int port, u16 vid) { struct mv88e6xxx_vtu_entry vlan; int i, err; - err = mv88e6xxx_vtu_get(chip, vid, &vlan, false); + if (!vid) + return -EOPNOTSUPP; + + vlan.vid = vid - 1; + vlan.valid = false; + + err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) return err; - /* Tell switchdev if this VLAN is handled in software */ - if (vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) + /* If the VLAN doesn't exist in hardware or the port isn't a member, + * tell switchdev that this VLAN is likely handled in software. + */ + if (vlan.vid != vid || !vlan.valid || + vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) return -EOPNOTSUPP; vlan.member[port] = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER; @@ -1685,7 +1903,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, goto unlock; for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { - err = _mv88e6xxx_port_vlan_del(chip, port, vid); + err = mv88e6xxx_port_vlan_leave(chip, port, vid); if (err) goto unlock; @@ -1723,8 +1941,7 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, int err; mv88e6xxx_reg_lock(chip); - err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, - MV88E6XXX_G1_ATU_DATA_STATE_UNUSED); + err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, 0); mv88e6xxx_reg_unlock(chip); return err; @@ -1738,7 +1955,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, bool is_static; int err; - addr.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED; + addr.state = 0; eth_broadcast_addr(addr.mac); do { @@ -1746,7 +1963,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, if (err) return err; - if (addr.state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED) + if (!addr.state) break; if (addr.trunk || (addr.portvec & BIT(port)) == 0) @@ -1768,9 +1985,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, dsa_fdb_dump_cb_t *cb, void *data) { - struct mv88e6xxx_vtu_entry vlan = { - .vid = chip->info->max_vid, - }; + struct mv88e6xxx_vtu_entry vlan; u16 fid; int err; @@ -1784,6 +1999,9 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, return err; /* Dump VLANs' Filtering Information Databases */ + vlan.vid = chip->info->max_vid; + vlan.valid = false; + do { err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) @@ -2044,13 +2262,96 @@ static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port) return 0; } +static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id) +{ + struct mv88e6xxx_port *mvp = dev_id; + struct mv88e6xxx_chip *chip = mvp->chip; + irqreturn_t ret = IRQ_NONE; + int port = mvp->port; + u8 lane; + + mv88e6xxx_reg_lock(chip); + lane = mv88e6xxx_serdes_get_lane(chip, port); + if (lane) + ret = mv88e6xxx_serdes_irq_status(chip, port, lane); + mv88e6xxx_reg_unlock(chip); + + return ret; +} + +static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port, + u8 lane) +{ + struct mv88e6xxx_port *dev_id = &chip->ports[port]; + unsigned int irq; + int err; + + /* Nothing to request if this SERDES port has no IRQ */ + irq = mv88e6xxx_serdes_irq_mapping(chip, port); + if (!irq) + return 0; + + /* Requesting the IRQ will trigger IRQ callbacks, so release the lock */ + mv88e6xxx_reg_unlock(chip); + err = request_threaded_irq(irq, NULL, mv88e6xxx_serdes_irq_thread_fn, + IRQF_ONESHOT, "mv88e6xxx-serdes", dev_id); + mv88e6xxx_reg_lock(chip); + if (err) + return err; + + dev_id->serdes_irq = irq; + + return mv88e6xxx_serdes_irq_enable(chip, port, lane); +} + +static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port, + u8 lane) +{ + struct mv88e6xxx_port *dev_id = &chip->ports[port]; + unsigned int irq = dev_id->serdes_irq; + int err; + + /* Nothing to free if no IRQ has been requested */ + if (!irq) + return 0; + + err = mv88e6xxx_serdes_irq_disable(chip, port, lane); + + /* Freeing the IRQ will trigger IRQ callbacks, so release the lock */ + mv88e6xxx_reg_unlock(chip); + free_irq(irq, dev_id); + mv88e6xxx_reg_lock(chip); + + dev_id->serdes_irq = 0; + + return err; +} + static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) { - if (chip->info->ops->serdes_power) - return chip->info->ops->serdes_power(chip, port, on); + u8 lane; + int err; - return 0; + lane = mv88e6xxx_serdes_get_lane(chip, port); + if (!lane) + return 0; + + if (on) { + err = mv88e6xxx_serdes_power_up(chip, port, lane); + if (err) + return err; + + err = mv88e6xxx_serdes_irq_request(chip, port, lane); + } else { + err = mv88e6xxx_serdes_irq_free(chip, port, lane); + if (err) + return err; + + err = mv88e6xxx_serdes_power_down(chip, port, lane); + } + + return err; } static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port) @@ -2141,16 +2442,6 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if (err) return err; - /* Enable the SERDES interface for DSA and CPU ports. Normal - * ports SERDES are enabled when the port is enabled, thus - * saving a bit of power. - */ - if ((dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) { - err = mv88e6xxx_serdes_power(chip, port, true); - if (err) - return err; - } - /* Port Control 2: don't force a good FCS, set the maximum frame size to * 10240 bytes, disable 802.1q tags checking, don't discard tagged or * untagged frames on this port, do a destination address lookup on all @@ -2227,9 +2518,11 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) return err; } - err = mv88e6xxx_setup_message_port(chip, port); - if (err) - return err; + if (chip->info->ops->port_setup_message_port) { + err = chip->info->ops->port_setup_message_port(chip, port); + if (err) + return err; + } /* Port based VLAN map: give each port the same default address * database, and allow bidirectional communication between the @@ -2256,12 +2549,7 @@ static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port, int err; mv88e6xxx_reg_lock(chip); - err = mv88e6xxx_serdes_power(chip, port, true); - - if (!err && chip->info->ops->serdes_irq_setup) - err = chip->info->ops->serdes_irq_setup(chip, port); - mv88e6xxx_reg_unlock(chip); return err; @@ -2272,16 +2560,8 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port) struct mv88e6xxx_chip *chip = ds->priv; mv88e6xxx_reg_lock(chip); - - if (mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED)) - dev_err(chip->dev, "failed to disable port\n"); - - if (chip->info->ops->serdes_irq_free) - chip->info->ops->serdes_irq_free(chip, port); - if (mv88e6xxx_serdes_power(chip, port, false)) dev_err(chip->dev, "failed to power off SERDES\n"); - mv88e6xxx_reg_unlock(chip); } @@ -2312,58 +2592,6 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_stats_clear(chip); } -/* The mv88e6390 has some hidden registers used for debug and - * development. The errata also makes use of them. - */ -static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port, - int reg, u16 val) -{ - u16 ctrl; - int err; - - err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT, - PORT_RESERVED_1A, val); - if (err) - return err; - - ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE | - PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | - reg; - - return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, - PORT_RESERVED_1A, ctrl); -} - -static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip) -{ - return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT, - PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY); -} - - -static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port, - int reg, u16 *val) -{ - u16 ctrl; - int err; - - ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ | - PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | - reg; - - err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, - PORT_RESERVED_1A, ctrl); - if (err) - return err; - - err = mv88e6390_hidden_wait(chip); - if (err) - return err; - - return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT, - PORT_RESERVED_1A, val); -} - /* Check if the errata has already been applied. */ static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) { @@ -2372,7 +2600,7 @@ static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) u16 val; for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { - err = mv88e6390_hidden_read(chip, port, 0, &val); + err = mv88e6xxx_port_hidden_read(chip, 0xf, port, 0, &val); if (err) { dev_err(chip->dev, "Error reading hidden register: %d\n", err); @@ -2405,7 +2633,7 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip) } for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { - err = mv88e6390_hidden_write(chip, port, 0, 0x01c0); + err = mv88e6xxx_port_hidden_write(chip, 0xf, port, 0, 0x01c0); if (err) return err; } @@ -2444,17 +2672,14 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) /* Setup Switch Port Registers */ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { - if (dsa_is_unused_port(ds, i)) { - err = mv88e6xxx_port_set_state(chip, i, - BR_STATE_DISABLED); - if (err) - goto unlock; - - err = mv88e6xxx_serdes_power(chip, i, false); - if (err) - goto unlock; - + if (dsa_is_unused_port(ds, i)) continue; + + /* Prevent the use of an invalid port. */ + if (mv88e6xxx_is_invalid_port(chip, i)) { + dev_err(chip->dev, "port %d is invalid\n", i); + err = -EINVAL; + goto unlock; } err = mv88e6xxx_setup_port(chip, i); @@ -2773,6 +2998,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2807,6 +3033,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .port_set_upstream_port = mv88e6095_port_set_upstream_port, .port_link_state = mv88e6185_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2843,6 +3070,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2877,6 +3105,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2914,6 +3143,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .port_set_pause = mv88e6185_port_set_pause, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2958,6 +3188,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6341_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -2971,7 +3203,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, - .serdes_power = mv88e6341_serdes_power, + .serdes_power = mv88e6390_serdes_power, + .serdes_get_lane = mv88e6341_serdes_get_lane, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6390_serdes_irq_enable, + .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6341_phylink_validate, }; @@ -2998,6 +3234,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3031,6 +3268,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3072,6 +3310,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3103,6 +3342,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed = mv88e6352_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, @@ -3113,6 +3353,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3127,6 +3368,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_get_lane = mv88e6352_serdes_get_lane, .serdes_power = mv88e6352_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6352_phylink_validate, @@ -3155,6 +3397,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3186,6 +3429,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed = mv88e6352_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, @@ -3196,6 +3440,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3210,9 +3455,11 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_get_lane = mv88e6352_serdes_get_lane, .serdes_power = mv88e6352_serdes_power, - .serdes_irq_setup = mv88e6352_serdes_irq_setup, - .serdes_irq_free = mv88e6352_serdes_irq_free, + .serdes_irq_mapping = mv88e6352_serdes_irq_mapping, + .serdes_irq_enable = mv88e6352_serdes_irq_enable, + .serdes_irq_status = mv88e6352_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6352_phylink_validate, }; @@ -3234,6 +3481,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .port_set_pause = mv88e6185_port_set_pause, .port_link_state = mv88e6185_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3267,6 +3515,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .port_set_speed = mv88e6390_port_set_speed, .port_max_speed_mode = mv88e6390_port_max_speed_mode, .port_tag_remap = mv88e6390_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, @@ -3276,6 +3525,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3291,8 +3541,10 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, - .serdes_irq_setup = mv88e6390_serdes_irq_setup, - .serdes_irq_free = mv88e6390_serdes_irq_free, + .serdes_get_lane = mv88e6390_serdes_get_lane, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6390_serdes_irq_enable, + .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390_phylink_validate, }; @@ -3312,6 +3564,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .port_set_speed = mv88e6390x_port_set_speed, .port_max_speed_mode = mv88e6390x_port_max_speed_mode, .port_tag_remap = mv88e6390_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, @@ -3321,6 +3574,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390x_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3335,9 +3589,11 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, - .serdes_power = mv88e6390x_serdes_power, - .serdes_irq_setup = mv88e6390x_serdes_irq_setup, - .serdes_irq_free = mv88e6390x_serdes_irq_free, + .serdes_power = mv88e6390_serdes_power, + .serdes_get_lane = mv88e6390x_serdes_get_lane, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6390_serdes_irq_enable, + .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390x_phylink_validate, }; @@ -3366,6 +3622,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3381,8 +3638,10 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, - .serdes_irq_setup = mv88e6390_serdes_irq_setup, - .serdes_irq_free = mv88e6390_serdes_irq_free, + .serdes_get_lane = mv88e6390_serdes_get_lane, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6390_serdes_irq_enable, + .serdes_irq_status = mv88e6390_serdes_irq_status, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, .phylink_validate = mv88e6390_phylink_validate, @@ -3403,6 +3662,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed = mv88e6352_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, @@ -3413,6 +3673,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3427,9 +3688,11 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_get_lane = mv88e6352_serdes_get_lane, .serdes_power = mv88e6352_serdes_power, - .serdes_irq_setup = mv88e6352_serdes_irq_setup, - .serdes_irq_free = mv88e6352_serdes_irq_free, + .serdes_irq_mapping = mv88e6352_serdes_irq_mapping, + .serdes_irq_enable = mv88e6352_serdes_irq_enable, + .serdes_irq_status = mv88e6352_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -3471,6 +3734,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = { .reset = mv88e6250_g1_reset, .vtu_getnext = mv88e6250_g1_vtu_getnext, .vtu_loadpurge = mv88e6250_g1_vtu_loadpurge, + .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6250_ptp_ops, .phylink_validate = mv88e6065_phylink_validate, }; @@ -3489,6 +3754,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .port_set_speed = mv88e6390_port_set_speed, .port_max_speed_mode = mv88e6390_port_max_speed_mode, .port_tag_remap = mv88e6390_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, @@ -3498,6 +3764,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3513,8 +3780,10 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, - .serdes_irq_setup = mv88e6390_serdes_irq_setup, - .serdes_irq_free = mv88e6390_serdes_irq_free, + .serdes_get_lane = mv88e6390_serdes_get_lane, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6390_serdes_irq_enable, + .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -3545,6 +3814,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3588,6 +3858,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3631,6 +3902,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6341_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3644,7 +3917,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, - .serdes_power = mv88e6341_serdes_power, + .serdes_power = mv88e6390_serdes_power, + .serdes_get_lane = mv88e6341_serdes_get_lane, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6390_serdes_irq_enable, + .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -3674,6 +3951,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3713,6 +3991,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3746,6 +4025,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed = mv88e6352_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, @@ -3756,6 +4036,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3770,9 +4051,11 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_get_lane = mv88e6352_serdes_get_lane, .serdes_power = mv88e6352_serdes_power, - .serdes_irq_setup = mv88e6352_serdes_irq_setup, - .serdes_irq_free = mv88e6352_serdes_irq_free, + .serdes_irq_mapping = mv88e6352_serdes_irq_mapping, + .serdes_irq_enable = mv88e6352_serdes_irq_enable, + .serdes_irq_status = mv88e6352_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -3797,6 +4080,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .port_set_speed = mv88e6390_port_set_speed, .port_max_speed_mode = mv88e6390_port_max_speed_mode, .port_tag_remap = mv88e6390_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, @@ -3808,6 +4092,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3823,8 +4108,10 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, - .serdes_irq_setup = mv88e6390_serdes_irq_setup, - .serdes_irq_free = mv88e6390_serdes_irq_free, + .serdes_get_lane = mv88e6390_serdes_get_lane, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6390_serdes_irq_enable, + .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -3846,6 +4133,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .port_set_speed = mv88e6390x_port_set_speed, .port_max_speed_mode = mv88e6390x_port_max_speed_mode, .port_tag_remap = mv88e6390_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, @@ -3857,6 +4145,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, .port_set_cmode = mv88e6390x_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3871,9 +4160,11 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, - .serdes_power = mv88e6390x_serdes_power, - .serdes_irq_setup = mv88e6390x_serdes_irq_setup, - .serdes_irq_free = mv88e6390x_serdes_irq_free, + .serdes_power = mv88e6390_serdes_power, + .serdes_get_lane = mv88e6390x_serdes_get_lane, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6390_serdes_irq_enable, + .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -4235,6 +4526,33 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .ops = &mv88e6191_ops, }, + [MV88E6220] = { + .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6220, + .family = MV88E6XXX_FAMILY_6250, + .name = "Marvell 88E6220", + .num_databases = 64, + + /* Ports 2-4 are not routed to pins + * => usable ports 0, 1, 5, 6 + */ + .num_ports = 7, + .num_internal_phys = 2, + .invalid_port_mask = BIT(2) | BIT(3) | BIT(4), + .max_vid = 4095, + .port_base_addr = 0x08, + .phy_base_addr = 0x00, + .global1_addr = 0x0f, + .global2_addr = 0x07, + .age_time_coeff = 15000, + .g1_irqs = 9, + .g2_irqs = 10, + .atu_move_port_mask = 0xf, + .dual_chip = true, + .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, + .ops = &mv88e6250_ops, + }, + [MV88E6240] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6240, .family = MV88E6XXX_FAMILY_6352, @@ -4277,6 +4595,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0xf, .dual_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, .ops = &mv88e6250_ops, }, @@ -4546,6 +4865,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) mutex_init(&chip->reg_lock); INIT_LIST_HEAD(&chip->mdios); + idr_init(&chip->policies); return chip; } @@ -4588,8 +4908,7 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port, int err; mv88e6xxx_reg_lock(chip); - err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid, - MV88E6XXX_G1_ATU_DATA_STATE_UNUSED); + err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid, 0); mv88e6xxx_reg_unlock(chip); return err; @@ -4631,6 +4950,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .set_eeprom = mv88e6xxx_set_eeprom, .get_regs_len = mv88e6xxx_get_regs_len, .get_regs = mv88e6xxx_get_regs, + .get_rxnfc = mv88e6xxx_get_rxnfc, + .set_rxnfc = mv88e6xxx_set_rxnfc, .set_ageing_time = mv88e6xxx_set_ageing_time, .port_bridge_join = mv88e6xxx_port_bridge_join, .port_bridge_leave = mv88e6xxx_port_bridge_leave, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 4646e46d47f2..e9b1a1ac9a8e 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -8,6 +8,7 @@ #ifndef _MV88E6XXX_CHIP_H #define _MV88E6XXX_CHIP_H +#include <linux/idr.h> #include <linux/if_vlan.h> #include <linux/irq.h> #include <linux/gpio/consumer.h> @@ -57,6 +58,7 @@ enum mv88e6xxx_model { MV88E6190, MV88E6190X, MV88E6191, + MV88E6220, MV88E6240, MV88E6250, MV88E6290, @@ -77,7 +79,7 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */ MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */ MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */ - MV88E6XXX_FAMILY_6250, /* 6250 */ + MV88E6XXX_FAMILY_6250, /* 6220 6250 */ MV88E6XXX_FAMILY_6320, /* 6320 6321 */ MV88E6XXX_FAMILY_6341, /* 6141 6341 */ MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */ @@ -105,6 +107,11 @@ struct mv88e6xxx_info { unsigned int g2_irqs; bool pvt; + /* Mark certain ports as invalid. This is required for example for the + * MV88E6220 (which is in general a MV88E6250 with 7 ports) but the + * ports 2-4 are not routet to pins. + */ + unsigned int invalid_port_mask; /* Multi-chip Addressing Mode. * Some chips respond to only 2 registers of its own SMI device address * when it is non-zero, and use indirect access to internal registers. @@ -183,6 +190,33 @@ struct mv88e6xxx_port_hwtstamp { struct hwtstamp_config tstamp_config; }; +enum mv88e6xxx_policy_mapping { + MV88E6XXX_POLICY_MAPPING_DA, + MV88E6XXX_POLICY_MAPPING_SA, + MV88E6XXX_POLICY_MAPPING_VTU, + MV88E6XXX_POLICY_MAPPING_ETYPE, + MV88E6XXX_POLICY_MAPPING_PPPOE, + MV88E6XXX_POLICY_MAPPING_VBAS, + MV88E6XXX_POLICY_MAPPING_OPT82, + MV88E6XXX_POLICY_MAPPING_UDP, +}; + +enum mv88e6xxx_policy_action { + MV88E6XXX_POLICY_ACTION_NORMAL, + MV88E6XXX_POLICY_ACTION_MIRROR, + MV88E6XXX_POLICY_ACTION_TRAP, + MV88E6XXX_POLICY_ACTION_DISCARD, +}; + +struct mv88e6xxx_policy { + enum mv88e6xxx_policy_mapping mapping; + enum mv88e6xxx_policy_action action; + struct ethtool_rx_flow_spec fs; + u8 addr[ETH_ALEN]; + int port; + u16 vid; +}; + struct mv88e6xxx_port { struct mv88e6xxx_chip *chip; int port; @@ -193,7 +227,7 @@ struct mv88e6xxx_port { u64 vtu_member_violation; u64 vtu_miss_violation; u8 cmode; - int serdes_irq; + unsigned int serdes_irq; }; struct mv88e6xxx_chip { @@ -241,6 +275,9 @@ struct mv88e6xxx_chip { /* List of mdio busses */ struct list_head mdios; + /* Policy Control List IDs and rules */ + struct idr policies; + /* There can be two interrupt controllers, which are chained * off a GPIO as interrupt source */ @@ -375,6 +412,10 @@ struct mv88e6xxx_ops { int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port); + int (*port_set_policy)(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action); + int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port, enum mv88e6xxx_frame_mode mode); int (*port_set_egress_floods)(struct mv88e6xxx_chip *chip, int port, @@ -389,6 +430,7 @@ struct mv88e6xxx_ops { u8 out); int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port); int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port); + int (*port_setup_message_port)(struct mv88e6xxx_chip *chip, int port); /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc. * Some chips allow this to be configured on specific ports. @@ -434,11 +476,19 @@ struct mv88e6xxx_ops { int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); /* Power on/off a SERDES interface */ - int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, bool on); + int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool up); + + /* SERDES lane mapping */ + u8 (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port); /* SERDES interrupt handling */ - int (*serdes_irq_setup)(struct mv88e6xxx_chip *chip, int port); - void (*serdes_irq_free)(struct mv88e6xxx_chip *chip, int port); + unsigned int (*serdes_irq_mapping)(struct mv88e6xxx_chip *chip, + int port); + int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool enable); + irqreturn_t (*serdes_irq_status)(struct mv88e6xxx_chip *chip, int port, + u8 lane); /* Statistics from the SERDES interface */ int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port); @@ -532,6 +582,10 @@ struct mv88e6xxx_ptp_ops { int arr1_sts_reg; int dep_sts_reg; u32 rx_filters; + u32 cc_shift; + u32 cc_mult; + u32 cc_mult_num; + u32 cc_mult_dem; }; #define STATS_TYPE_PORT BIT(0) @@ -570,11 +624,17 @@ static inline unsigned int mv88e6xxx_num_gpio(struct mv88e6xxx_chip *chip) return chip->info->num_gpio; } +static inline bool mv88e6xxx_is_invalid_port(struct mv88e6xxx_chip *chip, int port) +{ + return (chip->info->invalid_port_mask & BIT(port)) != 0; +} + int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val); int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); -int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 update); -int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask); +int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg, + u16 mask, u16 val); +int mv88e6xxx_wait_bit(struct mv88e6xxx_chip *chip, int addr, int reg, + int bit, int val); int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link, int speed, int duplex, int pause, phy_interface_t mode); diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 1323ef30a5e9..25ec4c0ac589 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -27,100 +27,52 @@ int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val) return mv88e6xxx_write(chip, addr, reg, val); } -int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask) +int mv88e6xxx_g1_wait_bit(struct mv88e6xxx_chip *chip, int reg, int + bit, int val) { - return mv88e6xxx_wait(chip, chip->info->global1_addr, reg, mask); + return mv88e6xxx_wait_bit(chip, chip->info->global1_addr, reg, + bit, val); +} + +int mv88e6xxx_g1_wait_mask(struct mv88e6xxx_chip *chip, int reg, + u16 mask, u16 val) +{ + return mv88e6xxx_wait_mask(chip, chip->info->global1_addr, reg, + mask, val); } /* Offset 0x00: Switch Global Status Register */ static int mv88e6185_g1_wait_ppu_disabled(struct mv88e6xxx_chip *chip) { - u16 state; - int i, err; - - for (i = 0; i < 16; i++) { - err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state); - if (err) - return err; - - /* Check the value of the PPUState bits 15:14 */ - state &= MV88E6185_G1_STS_PPU_STATE_MASK; - if (state != MV88E6185_G1_STS_PPU_STATE_POLLING) - return 0; - - usleep_range(1000, 2000); - } - - return -ETIMEDOUT; + return mv88e6xxx_g1_wait_mask(chip, MV88E6XXX_G1_STS, + MV88E6185_G1_STS_PPU_STATE_MASK, + MV88E6185_G1_STS_PPU_STATE_DISABLED); } static int mv88e6185_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip) { - u16 state; - int i, err; - - for (i = 0; i < 16; ++i) { - err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state); - if (err) - return err; - - /* Check the value of the PPUState bits 15:14 */ - state &= MV88E6185_G1_STS_PPU_STATE_MASK; - if (state == MV88E6185_G1_STS_PPU_STATE_POLLING) - return 0; - - usleep_range(1000, 2000); - } - - return -ETIMEDOUT; + return mv88e6xxx_g1_wait_mask(chip, MV88E6XXX_G1_STS, + MV88E6185_G1_STS_PPU_STATE_MASK, + MV88E6185_G1_STS_PPU_STATE_POLLING); } static int mv88e6352_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip) { - u16 state; - int i, err; + int bit = __bf_shf(MV88E6352_G1_STS_PPU_STATE); - for (i = 0; i < 16; ++i) { - err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state); - if (err) - return err; - - /* Check the value of the PPUState (or InitState) bit 15 */ - if (state & MV88E6352_G1_STS_PPU_STATE) - return 0; - - usleep_range(1000, 2000); - } - - return -ETIMEDOUT; + return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1); } static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip) { - const unsigned long timeout = jiffies + 1 * HZ; - u16 val; - int err; + int bit = __bf_shf(MV88E6XXX_G1_STS_INIT_READY); /* Wait up to 1 second for the switch to be ready. The InitReady bit 11 * is set to a one when all units inside the device (ATU, VTU, etc.) * have finished their initialization and are ready to accept frames. */ - while (time_before(jiffies, timeout)) { - err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val); - if (err) - return err; - - if (val & MV88E6XXX_G1_STS_INIT_READY) - break; - - usleep_range(1000, 2000); - } - - if (time_after(jiffies, timeout)) - return -ETIMEDOUT; - - return 0; + return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1); } /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1 @@ -476,8 +428,9 @@ int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index) static int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip) { - return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_STATS_OP, - MV88E6XXX_G1_STATS_OP_BUSY); + int bit = __bf_shf(MV88E6XXX_G1_STATS_OP_BUSY); + + return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STATS_OP, bit, 0); } int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip) diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index d444266f7d78..0870fcc8bfc8 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -128,19 +128,36 @@ #define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) /* Offset 0x0C: ATU Data Register */ -#define MV88E6XXX_G1_ATU_DATA 0x0c -#define MV88E6XXX_G1_ATU_DATA_TRUNK 0x8000 -#define MV88E6XXX_G1_ATU_DATA_TRUNK_ID_MASK 0x00f0 -#define MV88E6XXX_G1_ATU_DATA_PORT_VECTOR_MASK 0x3ff0 -#define MV88E6XXX_G1_ATU_DATA_STATE_MASK 0x000f -#define MV88E6XXX_G1_ATU_DATA_STATE_UNUSED 0x0000 -#define MV88E6XXX_G1_ATU_DATA_STATE_UC_MGMT 0x000d -#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC 0x000e -#define MV88E6XXX_G1_ATU_DATA_STATE_UC_PRIO_OVER 0x000f -#define MV88E6XXX_G1_ATU_DATA_STATE_MC_NONE_RATE 0x0005 -#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC 0x0007 -#define MV88E6XXX_G1_ATU_DATA_STATE_MC_MGMT 0x000e -#define MV88E6XXX_G1_ATU_DATA_STATE_MC_PRIO_OVER 0x000f +#define MV88E6XXX_G1_ATU_DATA 0x0c +#define MV88E6XXX_G1_ATU_DATA_TRUNK 0x8000 +#define MV88E6XXX_G1_ATU_DATA_TRUNK_ID_MASK 0x00f0 +#define MV88E6XXX_G1_ATU_DATA_PORT_VECTOR_MASK 0x3ff0 +#define MV88E6XXX_G1_ATU_DATA_STATE_MASK 0x000f +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_UNUSED 0x0000 +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_1_OLDEST 0x0001 +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_2 0x0002 +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_3 0x0003 +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_4 0x0004 +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_5 0x0005 +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_6 0x0006 +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_7_NEWEST 0x0007 +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_POLICY 0x0008 +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_POLICY_PO 0x0009 +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_AVB_NRL 0x000a +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_AVB_NRL_PO 0x000b +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_DA_MGMT 0x000c +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_DA_MGMT_PO 0x000d +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC 0x000e +#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_PO 0x000f +#define MV88E6XXX_G1_ATU_DATA_STATE_MC_UNUSED 0x0000 +#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_POLICY 0x0004 +#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_AVB_NRL 0x0005 +#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_DA_MGMT 0x0006 +#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC 0x0007 +#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_POLICY_PO 0x000c +#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_AVB_NRL_PO 0x000d +#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_DA_MGMT_PO 0x000e +#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_PO 0x000f /* Offset 0x0D: ATU MAC Address Register Bytes 0 & 1 * Offset 0x0E: ATU MAC Address Register Bytes 2 & 3 @@ -249,7 +266,10 @@ int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val); int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val); -int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask); +int mv88e6xxx_g1_wait_bit(struct mv88e6xxx_chip *chip, int reg, int + bit, int val); +int mv88e6xxx_g1_wait_mask(struct mv88e6xxx_chip *chip, int reg, + u16 mask, u16 val); int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 1cf388e9bd94..792a96ef418f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -5,6 +5,8 @@ * Copyright (c) 2008 Marvell Semiconductor * Copyright (c) 2017 Savoir-faire Linux, Inc. */ + +#include <linux/bitfield.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> @@ -75,8 +77,9 @@ int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip, static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip) { - return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_ATU_OP, - MV88E6XXX_G1_ATU_OP_BUSY); + int bit = __bf_shf(MV88E6XXX_G1_ATU_OP_BUSY); + + return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_ATU_OP, bit, 0); } static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op) @@ -132,7 +135,7 @@ static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip, return err; entry->state = val & 0xf; - if (entry->state != MV88E6XXX_G1_ATU_DATA_STATE_UNUSED) { + if (entry->state) { entry->trunk = !!(val & MV88E6XXX_G1_ATU_DATA_TRUNK); entry->portvec = (val >> 4) & mv88e6xxx_port_mask(chip); } @@ -145,7 +148,7 @@ static int mv88e6xxx_g1_atu_data_write(struct mv88e6xxx_chip *chip, { u16 data = entry->state & 0xf; - if (entry->state != MV88E6XXX_G1_ATU_DATA_STATE_UNUSED) { + if (entry->state) { if (entry->trunk) data |= MV88E6XXX_G1_ATU_DATA_TRUNK; @@ -206,7 +209,7 @@ int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid, return err; /* Write the MAC address to iterate from only once */ - if (entry->state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED) { + if (!entry->state) { err = mv88e6xxx_g1_atu_mac_write(chip, entry); if (err) return err; diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c index 6cac997360e8..33056a609e96 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c @@ -7,6 +7,7 @@ * Copyright (c) 2017 Savoir-faire Linux, Inc. */ +#include <linux/bitfield.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> @@ -67,8 +68,9 @@ static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip, static int mv88e6xxx_g1_vtu_op_wait(struct mv88e6xxx_chip *chip) { - return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_VTU_OP, - MV88E6XXX_G1_VTU_OP_BUSY); + int bit = __bf_shf(MV88E6XXX_G1_VTU_OP_BUSY); + + return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_VTU_OP, bit, 0); } static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op) diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 2305b94b3051..bdbb72fc20ed 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -26,14 +26,11 @@ int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val) return mv88e6xxx_write(chip, chip->info->global2_addr, reg, val); } -int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update) +int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg, int + bit, int val) { - return mv88e6xxx_update(chip, chip->info->global2_addr, reg, update); -} - -int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask) -{ - return mv88e6xxx_wait(chip, chip->info->global2_addr, reg, mask); + return mv88e6xxx_wait_bit(chip, chip->info->global2_addr, reg, + bit, val); } /* Offset 0x00: Interrupt Source Register */ @@ -123,7 +120,8 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, * but bit 4 is reserved on older chips, so it is safe to use. */ - return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val); + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_DEVICE_MAPPING, + MV88E6XXX_G2_DEVICE_MAPPING_UPDATE | val); } /* Offset 0x07: Trunk Mask Table register */ @@ -136,7 +134,8 @@ static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num, if (hash) val |= MV88E6XXX_G2_TRUNK_MASK_HASH; - return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MASK, val); + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_TRUNK_MASK, + MV88E6XXX_G2_TRUNK_MASK_UPDATE | val); } /* Offset 0x08: Trunk Mapping Table register */ @@ -147,7 +146,8 @@ static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id, const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1; u16 val = (id << 11) | (map & port_mask); - return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val); + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_TRUNK_MAPPING, + MV88E6XXX_G2_TRUNK_MAPPING_UPDATE | val); } int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip) @@ -178,8 +178,9 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip) static int mv88e6xxx_g2_irl_wait(struct mv88e6xxx_chip *chip) { - return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_IRL_CMD, - MV88E6XXX_G2_IRL_CMD_BUSY); + int bit = __bf_shf(MV88E6XXX_G2_IRL_CMD_BUSY); + + return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_IRL_CMD, bit, 0); } static int mv88e6xxx_g2_irl_op(struct mv88e6xxx_chip *chip, u16 op, int port, @@ -214,8 +215,9 @@ int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port) static int mv88e6xxx_g2_pvt_op_wait(struct mv88e6xxx_chip *chip) { - return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_PVT_ADDR, - MV88E6XXX_G2_PVT_ADDR_BUSY); + int bit = __bf_shf(MV88E6XXX_G2_PVT_ADDR_BUSY); + + return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_PVT_ADDR, bit, 0); } static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev, @@ -261,7 +263,8 @@ static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip, { u16 val = (pointer << 8) | data; - return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_SWITCH_MAC, val); + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MAC, + MV88E6XXX_G2_SWITCH_MAC_UPDATE | val); } int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr) @@ -284,7 +287,8 @@ static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer, { u16 val = (pointer << 8) | (data & 0x7); - return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_PRIO_OVERRIDE, val); + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PRIO_OVERRIDE, + MV88E6XXX_G2_PRIO_OVERRIDE_UPDATE | val); } int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) @@ -308,9 +312,16 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) { - return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_EEPROM_CMD, - MV88E6XXX_G2_EEPROM_CMD_BUSY | - MV88E6XXX_G2_EEPROM_CMD_RUNNING); + int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY); + int err; + + err = mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_EEPROM_CMD, bit, 0); + if (err) + return err; + + bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_RUNNING); + + return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_EEPROM_CMD, bit, 0); } static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd) @@ -572,8 +583,9 @@ int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip, static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip) { - return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_SMI_PHY_CMD, - MV88E6XXX_G2_SMI_PHY_CMD_BUSY); + int bit = __bf_shf(MV88E6XXX_G2_SMI_PHY_CMD_BUSY); + + return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_SMI_PHY_CMD, bit, 0); } static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd) @@ -840,12 +852,13 @@ const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops = { static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip) { - return mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL, - MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE | - MV88E6390_G2_WDOG_CTL_CUT_THROUGH | - MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER | - MV88E6390_G2_WDOG_CTL_EGRESS | - MV88E6390_G2_WDOG_CTL_FORCE_IRQ); + return mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL, + MV88E6390_G2_WDOG_CTL_UPDATE | + MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE | + MV88E6390_G2_WDOG_CTL_CUT_THROUGH | + MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER | + MV88E6390_G2_WDOG_CTL_EGRESS | + MV88E6390_G2_WDOG_CTL_FORCE_IRQ); } static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq) @@ -878,8 +891,9 @@ static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq) static void mv88e6390_watchdog_free(struct mv88e6xxx_chip *chip) { - mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL, - MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE); + mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL, + MV88E6390_G2_WDOG_CTL_UPDATE | + MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE); } const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = { diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index a664fc25f132..42da4bca73e8 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -295,8 +295,8 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val); int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val); -int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update); -int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask); +int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg, + int bit, int val); int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port); @@ -376,12 +376,8 @@ static inline int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 v return -EOPNOTSUPP; } -static inline int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update) -{ - return -EOPNOTSUPP; -} - -static inline int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask) +static inline int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, + int reg, int bit, int val) { return -EOPNOTSUPP; } diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c index 116b8cf5a6e3..657783e043ff 100644 --- a/drivers/net/dsa/mv88e6xxx/global2_avb.c +++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c @@ -11,6 +11,8 @@ * Brandon Streiff <brandon.streiff@ni.com> */ +#include <linux/bitfield.h> + #include "global2.h" /* Offset 0x16: AVB Command Register @@ -27,17 +29,33 @@ /* mv88e6xxx_g2_avb_read -- Read one or multiple 16-bit words. * The hardware supports snapshotting up to four contiguous registers. */ +static int mv88e6xxx_g2_avb_wait(struct mv88e6xxx_chip *chip) +{ + int bit = __bf_shf(MV88E6352_G2_AVB_CMD_BUSY); + + return mv88e6xxx_g2_wait_bit(chip, MV88E6352_G2_AVB_CMD, bit, 0); +} + static int mv88e6xxx_g2_avb_read(struct mv88e6xxx_chip *chip, u16 readop, u16 *data, int len) { int err; int i; + err = mv88e6xxx_g2_avb_wait(chip); + if (err) + return err; + /* Hardware can only snapshot four words. */ if (len > 4) return -E2BIG; - err = mv88e6xxx_g2_update(chip, MV88E6352_G2_AVB_CMD, readop); + err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_CMD, + MV88E6352_G2_AVB_CMD_BUSY | readop); + if (err) + return err; + + err = mv88e6xxx_g2_avb_wait(chip); if (err) return err; @@ -57,11 +75,18 @@ static int mv88e6xxx_g2_avb_write(struct mv88e6xxx_chip *chip, u16 writeop, { int err; + err = mv88e6xxx_g2_avb_wait(chip); + if (err) + return err; + err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_DATA, data); if (err) return err; - return mv88e6xxx_g2_update(chip, MV88E6352_G2_AVB_CMD, writeop); + err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_CMD, + MV88E6352_G2_AVB_CMD_BUSY | writeop); + + return mv88e6xxx_g2_avb_wait(chip); } static int mv88e6352_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip, diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c index baddecadd8be..33b7b9570d29 100644 --- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c +++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c @@ -37,7 +37,8 @@ static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg, { u16 value = (reg << 8) | data; - return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC, value); + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC, + MV88E6XXX_G2_SCRATCH_MISC_UPDATE | value); } /** diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 04309ef0a1cc..15ef81654b67 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -392,17 +392,14 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port) return PHY_INTERFACE_MODE_NA; } -int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, - phy_interface_t mode) +static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode) { - int lane; + u8 lane; u16 cmode; u16 reg; int err; - if (port != 9 && port != 10) - return -EOPNOTSUPP; - /* Default to a slow mode, so freeing up SERDES interfaces for * other ports which might use them for SFPs. */ @@ -411,7 +408,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, switch (mode) { case PHY_INTERFACE_MODE_1000BASEX: - cmode = MV88E6XXX_PORT_STS_CMODE_1000BASE_X; + cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX; break; case PHY_INTERFACE_MODE_SGMII: cmode = MV88E6XXX_PORT_STS_CMODE_SGMII; @@ -434,18 +431,15 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (cmode == chip->ports[port].cmode) return 0; - lane = mv88e6390x_serdes_get_lane(chip, port); - if (lane < 0 && lane != -ENODEV) - return lane; - - if (lane >= 0) { + lane = mv88e6xxx_serdes_get_lane(chip, port); + if (lane) { if (chip->ports[port].serdes_irq) { - err = mv88e6390_serdes_irq_disable(chip, port, lane); + err = mv88e6xxx_serdes_irq_disable(chip, port, lane); if (err) return err; } - err = mv88e6390x_serdes_power(chip, port, false); + err = mv88e6xxx_serdes_power_down(chip, port, lane); if (err) return err; } @@ -466,16 +460,16 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, chip->ports[port].cmode = cmode; - lane = mv88e6390x_serdes_get_lane(chip, port); - if (lane < 0) - return lane; + lane = mv88e6xxx_serdes_get_lane(chip, port); + if (!lane) + return -ENODEV; - err = mv88e6390x_serdes_power(chip, port, true); + err = mv88e6xxx_serdes_power_up(chip, port, lane); if (err) return err; if (chip->ports[port].serdes_irq) { - err = mv88e6390_serdes_irq_enable(chip, port, lane); + err = mv88e6xxx_serdes_irq_enable(chip, port, lane); if (err) return err; } @@ -484,9 +478,21 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return 0; } +int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode) +{ + if (port != 9 && port != 10) + return -EOPNOTSUPP; + + return mv88e6xxx_port_set_cmode(chip, port, mode); +} + int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode) { + if (port != 9 && port != 10) + return -EOPNOTSUPP; + switch (mode) { case PHY_INTERFACE_MODE_NA: return 0; @@ -498,7 +504,58 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, break; } - return mv88e6390x_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode); +} + +static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip, + int port) +{ + int err, addr; + u16 reg, bits; + + if (port != 5) + return -EOPNOTSUPP; + + addr = chip->info->port_base_addr + port; + + err = mv88e6xxx_port_hidden_read(chip, 0x7, addr, 0, ®); + if (err) + return err; + + bits = MV88E6341_PORT_RESERVED_1A_FORCE_CMODE | + MV88E6341_PORT_RESERVED_1A_SGMII_AN; + + if ((reg & bits) == bits) + return 0; + + reg |= bits; + return mv88e6xxx_port_hidden_write(chip, 0x7, addr, 0, reg); +} + +int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode) +{ + int err; + + if (port != 5) + return -EOPNOTSUPP; + + switch (mode) { + case PHY_INTERFACE_MODE_NA: + return 0; + case PHY_INTERFACE_MODE_XGMII: + case PHY_INTERFACE_MODE_XAUI: + case PHY_INTERFACE_MODE_RXAUI: + return -EINVAL; + default: + break; + } + + err = mv88e6341_port_set_cmode_writable(chip, port); + if (err) + return err; + + return mv88e6xxx_port_set_cmode(chip, port, mode); } int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) @@ -590,6 +647,7 @@ int mv88e6250_port_link_state(struct mv88e6xxx_chip *chip, int port, state->link = !!(reg & MV88E6250_PORT_STS_LINK); state->an_enabled = 1; state->an_complete = state->link; + state->interface = PHY_INTERFACE_MODE_NA; return 0; } @@ -600,6 +658,43 @@ int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port, int err; u16 reg; + switch (chip->ports[port].cmode) { + case MV88E6XXX_PORT_STS_CMODE_RGMII: + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, + ®); + if (err) + return err; + + if ((reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK) && + (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK)) + state->interface = PHY_INTERFACE_MODE_RGMII_ID; + else if (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK) + state->interface = PHY_INTERFACE_MODE_RGMII_RXID; + else if (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK) + state->interface = PHY_INTERFACE_MODE_RGMII_TXID; + else + state->interface = PHY_INTERFACE_MODE_RGMII; + break; + case MV88E6XXX_PORT_STS_CMODE_1000BASEX: + state->interface = PHY_INTERFACE_MODE_1000BASEX; + break; + case MV88E6XXX_PORT_STS_CMODE_SGMII: + state->interface = PHY_INTERFACE_MODE_SGMII; + break; + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + state->interface = PHY_INTERFACE_MODE_2500BASEX; + break; + case MV88E6XXX_PORT_STS_CMODE_XAUI: + state->interface = PHY_INTERFACE_MODE_XAUI; + break; + case MV88E6XXX_PORT_STS_CMODE_RXAUI: + state->interface = PHY_INTERFACE_MODE_RXAUI; + break; + default: + /* we do not support other cmode values here */ + state->interface = PHY_INTERFACE_MODE_NA; + } + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); if (err) return err; @@ -1246,3 +1341,77 @@ int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port) return 0; } + +/* Offset 0x0E: Policy Control Register */ + +int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action) +{ + u16 reg, mask, val; + int shift; + int err; + + switch (mapping) { + case MV88E6XXX_POLICY_MAPPING_DA: + shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_DA_MASK); + mask = MV88E6XXX_PORT_POLICY_CTL_DA_MASK; + break; + case MV88E6XXX_POLICY_MAPPING_SA: + shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_SA_MASK); + mask = MV88E6XXX_PORT_POLICY_CTL_SA_MASK; + break; + case MV88E6XXX_POLICY_MAPPING_VTU: + shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VTU_MASK); + mask = MV88E6XXX_PORT_POLICY_CTL_VTU_MASK; + break; + case MV88E6XXX_POLICY_MAPPING_ETYPE: + shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK); + mask = MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK; + break; + case MV88E6XXX_POLICY_MAPPING_PPPOE: + shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK); + mask = MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK; + break; + case MV88E6XXX_POLICY_MAPPING_VBAS: + shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK); + mask = MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK; + break; + case MV88E6XXX_POLICY_MAPPING_OPT82: + shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK); + mask = MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK; + break; + case MV88E6XXX_POLICY_MAPPING_UDP: + shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_UDP_MASK); + mask = MV88E6XXX_PORT_POLICY_CTL_UDP_MASK; + break; + default: + return -EOPNOTSUPP; + } + + switch (action) { + case MV88E6XXX_POLICY_ACTION_NORMAL: + val = MV88E6XXX_PORT_POLICY_CTL_NORMAL; + break; + case MV88E6XXX_POLICY_ACTION_MIRROR: + val = MV88E6XXX_PORT_POLICY_CTL_MIRROR; + break; + case MV88E6XXX_POLICY_ACTION_TRAP: + val = MV88E6XXX_PORT_POLICY_CTL_TRAP; + break; + case MV88E6XXX_POLICY_ACTION_DISCARD: + val = MV88E6XXX_PORT_POLICY_CTL_DISCARD; + break; + default: + return -EOPNOTSUPP; + } + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_POLICY_CTL, ®); + if (err) + return err; + + reg &= ~mask; + reg |= (val << shift) & mask; + + return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_POLICY_CTL, reg); +} diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 8d5a6cd6fb19..03a480cd71b9 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -42,8 +42,9 @@ #define MV88E6XXX_PORT_STS_TX_PAUSED 0x0020 #define MV88E6XXX_PORT_STS_FLOW_CTL 0x0010 #define MV88E6XXX_PORT_STS_CMODE_MASK 0x000f -#define MV88E6XXX_PORT_STS_CMODE_100BASE_X 0x0008 -#define MV88E6XXX_PORT_STS_CMODE_1000BASE_X 0x0009 +#define MV88E6XXX_PORT_STS_CMODE_RGMII 0x0007 +#define MV88E6XXX_PORT_STS_CMODE_100BASEX 0x0008 +#define MV88E6XXX_PORT_STS_CMODE_1000BASEX 0x0009 #define MV88E6XXX_PORT_STS_CMODE_SGMII 0x000a #define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b #define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c @@ -117,6 +118,7 @@ #define MV88E6XXX_PORT_SWITCH_ID_PROD_6190 0x1900 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6191 0x1910 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6185 0x1a70 +#define MV88E6XXX_PORT_SWITCH_ID_PROD_6220 0x2200 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6250 0x2500 #define MV88E6XXX_PORT_SWITCH_ID_PROD_6290 0x2900 @@ -220,7 +222,19 @@ #define MV88E6XXX_PORT_PRI_OVERRIDE 0x0d /* Offset 0x0E: Policy Control Register */ -#define MV88E6XXX_PORT_POLICY_CTL 0x0e +#define MV88E6XXX_PORT_POLICY_CTL 0x0e +#define MV88E6XXX_PORT_POLICY_CTL_DA_MASK 0xc000 +#define MV88E6XXX_PORT_POLICY_CTL_SA_MASK 0x3000 +#define MV88E6XXX_PORT_POLICY_CTL_VTU_MASK 0x0c00 +#define MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK 0x0300 +#define MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK 0x00c0 +#define MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK 0x0030 +#define MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK 0x000c +#define MV88E6XXX_PORT_POLICY_CTL_UDP_MASK 0x0003 +#define MV88E6XXX_PORT_POLICY_CTL_NORMAL 0x0000 +#define MV88E6XXX_PORT_POLICY_CTL_MIRROR 0x0001 +#define MV88E6XXX_PORT_POLICY_CTL_TRAP 0x0002 +#define MV88E6XXX_PORT_POLICY_CTL_DISCARD 0x0003 /* Offset 0x0F: Port Special Ether Type */ #define MV88E6XXX_PORT_ETH_TYPE 0x0f @@ -259,14 +273,16 @@ #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 /* Offset 0x1a: Magic undocumented errata register */ -#define PORT_RESERVED_1A 0x1a -#define PORT_RESERVED_1A_BUSY BIT(15) -#define PORT_RESERVED_1A_WRITE BIT(14) -#define PORT_RESERVED_1A_READ 0 -#define PORT_RESERVED_1A_PORT_SHIFT 5 -#define PORT_RESERVED_1A_BLOCK (0xf << 10) -#define PORT_RESERVED_1A_CTRL_PORT 4 -#define PORT_RESERVED_1A_DATA_PORT 5 +#define MV88E6XXX_PORT_RESERVED_1A 0x1a +#define MV88E6XXX_PORT_RESERVED_1A_BUSY 0x8000 +#define MV88E6XXX_PORT_RESERVED_1A_WRITE 0x4000 +#define MV88E6XXX_PORT_RESERVED_1A_READ 0x0000 +#define MV88E6XXX_PORT_RESERVED_1A_PORT_SHIFT 5 +#define MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT 10 +#define MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT 0x04 +#define MV88E6XXX_PORT_RESERVED_1A_DATA_PORT 0x05 +#define MV88E6341_PORT_RESERVED_1A_FORCE_CMODE 0x8000 +#define MV88E6341_PORT_RESERVED_1A_SGMII_AN 0x2000 int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, u16 *val); @@ -320,6 +336,9 @@ int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port, bool unicast, bool multicast); int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port, bool unicast, bool multicast); +int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action); int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, u16 etype); int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port, @@ -332,6 +351,8 @@ int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, u8 out); int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, u8 out); +int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode); int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, @@ -351,4 +372,10 @@ int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port); int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port); +int mv88e6xxx_port_hidden_write(struct mv88e6xxx_chip *chip, int block, + int port, int reg, u16 val); +int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip); +int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port, + int reg, u16 *val); + #endif /* _MV88E6XXX_PORT_H */ diff --git a/drivers/net/dsa/mv88e6xxx/port_hidden.c b/drivers/net/dsa/mv88e6xxx/port_hidden.c new file mode 100644 index 000000000000..b49d05f0e117 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/port_hidden.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Marvell 88E6xxx Switch Hidden Registers support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2019 Andrew Lunn <andrew@lunn.ch> + */ + +#include <linux/bitfield.h> + +#include "chip.h" +#include "port.h" + +/* The mv88e6390 and mv88e6341 have some hidden registers used for debug and + * development. The errata also makes use of them. + */ +int mv88e6xxx_port_hidden_write(struct mv88e6xxx_chip *chip, int block, + int port, int reg, u16 val) +{ + u16 ctrl; + int err; + + err = mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_DATA_PORT, + MV88E6XXX_PORT_RESERVED_1A, val); + if (err) + return err; + + ctrl = MV88E6XXX_PORT_RESERVED_1A_BUSY | + MV88E6XXX_PORT_RESERVED_1A_WRITE | + block << MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT | + port << MV88E6XXX_PORT_RESERVED_1A_PORT_SHIFT | + reg; + + return mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT, + MV88E6XXX_PORT_RESERVED_1A, ctrl); +} + +int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip) +{ + int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY); + + return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT, + MV88E6XXX_PORT_RESERVED_1A, bit, 0); +} + +int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port, + int reg, u16 *val) +{ + u16 ctrl; + int err; + + ctrl = MV88E6XXX_PORT_RESERVED_1A_BUSY | + MV88E6XXX_PORT_RESERVED_1A_READ | + block << MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT | + port << MV88E6XXX_PORT_RESERVED_1A_PORT_SHIFT | + reg; + + err = mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT, + MV88E6XXX_PORT_RESERVED_1A, ctrl); + if (err) + return err; + + err = mv88e6xxx_port_hidden_wait(chip); + if (err) + return err; + + return mv88e6xxx_port_read(chip, MV88E6XXX_PORT_RESERVED_1A_DATA_PORT, + MV88E6XXX_PORT_RESERVED_1A, val); +} diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index 768d256f7c9f..073cbd0bb91b 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -15,11 +15,31 @@ #include "hwtstamp.h" #include "ptp.h" -/* Raw timestamps are in units of 8-ns clock periods. */ -#define CC_SHIFT 28 -#define CC_MULT (8 << CC_SHIFT) -#define CC_MULT_NUM (1 << 9) -#define CC_MULT_DEM 15625ULL +#define MV88E6XXX_MAX_ADJ_PPB 1000000 + +/* Family MV88E6250: + * Raw timestamps are in units of 10-ns clock periods. + * + * clkadj = scaled_ppm * 10*2^28 / (10^6 * 2^16) + * simplifies to + * clkadj = scaled_ppm * 2^7 / 5^5 + */ +#define MV88E6250_CC_SHIFT 28 +#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT) +#define MV88E6250_CC_MULT_NUM (1 << 7) +#define MV88E6250_CC_MULT_DEM 3125ULL + +/* Other families: + * Raw timestamps are in units of 8-ns clock periods. + * + * clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16) + * simplifies to + * clkadj = scaled_ppm * 2^9 / 5^6 + */ +#define MV88E6XXX_CC_SHIFT 28 +#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT) +#define MV88E6XXX_CC_MULT_NUM (1 << 9) +#define MV88E6XXX_CC_MULT_DEM 15625ULL #define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100) @@ -179,6 +199,7 @@ out: static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; int neg_adj = 0; u32 diff, mult; u64 adj; @@ -187,10 +208,11 @@ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) neg_adj = 1; scaled_ppm = -scaled_ppm; } - mult = CC_MULT; - adj = CC_MULT_NUM; + + mult = ptp_ops->cc_mult; + adj = ptp_ops->cc_mult_num; adj *= scaled_ppm; - diff = div_u64(adj, CC_MULT_DEM); + diff = div_u64(adj, ptp_ops->cc_mult_dem); mv88e6xxx_reg_lock(chip); @@ -310,7 +332,27 @@ static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, return 0; } -const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { +const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = { + .clock_read = mv88e6165_ptp_clock_read, + .global_enable = mv88e6165_global_enable, + .global_disable = mv88e6165_global_disable, + .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS, + .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS, + .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS, + .rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), + .cc_shift = MV88E6XXX_CC_SHIFT, + .cc_mult = MV88E6XXX_CC_MULT, + .cc_mult_num = MV88E6XXX_CC_MULT_NUM, + .cc_mult_dem = MV88E6XXX_CC_MULT_DEM, +}; + +const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = { .clock_read = mv88e6352_ptp_clock_read, .ptp_enable = mv88e6352_ptp_enable, .ptp_verify = mv88e6352_ptp_verify, @@ -331,22 +373,37 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), + .cc_shift = MV88E6250_CC_SHIFT, + .cc_mult = MV88E6250_CC_MULT, + .cc_mult_num = MV88E6250_CC_MULT_NUM, + .cc_mult_dem = MV88E6250_CC_MULT_DEM, }; -const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = { - .clock_read = mv88e6165_ptp_clock_read, - .global_enable = mv88e6165_global_enable, - .global_disable = mv88e6165_global_disable, - .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS, - .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS, - .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS, +const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { + .clock_read = mv88e6352_ptp_clock_read, + .ptp_enable = mv88e6352_ptp_enable, + .ptp_verify = mv88e6352_ptp_verify, + .event_work = mv88e6352_tai_event_work, + .port_enable = mv88e6352_hwtstamp_port_enable, + .port_disable = mv88e6352_hwtstamp_port_disable, + .n_ext_ts = 1, + .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS, + .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS, + .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS, .rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), + .cc_shift = MV88E6XXX_CC_SHIFT, + .cc_mult = MV88E6XXX_CC_MULT, + .cc_mult_num = MV88E6XXX_CC_MULT_NUM, + .cc_mult_dem = MV88E6XXX_CC_MULT_DEM, }; static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) @@ -384,8 +441,8 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc)); chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read; chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32); - chip->tstamp_cc.mult = CC_MULT; - chip->tstamp_cc.shift = CC_SHIFT; + chip->tstamp_cc.mult = ptp_ops->cc_mult; + chip->tstamp_cc.shift = ptp_ops->cc_shift; timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc, ktime_to_ns(ktime_get_real())); @@ -397,7 +454,6 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) chip->ptp_clock_info.owner = THIS_MODULE; snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name), "%s", dev_name(chip->dev)); - chip->ptp_clock_info.max_adj = 1000000; chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts; chip->ptp_clock_info.n_per_out = 0; @@ -413,6 +469,7 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) } chip->ptp_clock_info.pin_config = chip->pin_config; + chip->ptp_clock_info.max_adj = MV88E6XXX_MAX_ADJ_PPB; chip->ptp_clock_info.adjfine = mv88e6xxx_ptp_adjfine; chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime; chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime; diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h index 0a1f8de8f062..269d5d16a466 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.h +++ b/drivers/net/dsa/mv88e6xxx/ptp.h @@ -148,8 +148,9 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip); #define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \ ptp_clock_info) -extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops; extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops; +extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops; +extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops; #else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ @@ -167,8 +168,9 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) { } -static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {}; static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {}; +static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {}; +static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {}; #endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */ diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 20c526c2a9ee..902feb398746 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -49,7 +49,8 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip, return mv88e6xxx_phy_write(chip, lane, reg_c45, val); } -static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on) +int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool up) { u16 val, new_val; int err; @@ -58,7 +59,7 @@ static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on) if (err) return err; - if (on) + if (up) new_val = val & ~BMCR_PDOWN; else new_val = val | BMCR_PDOWN; @@ -69,29 +70,25 @@ static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on) return err; } -static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port) +u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode = chip->ports[port].cmode; + u8 lane = 0; - if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASE_X) || - (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) || + if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX) || + (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX) || (cmode == MV88E6XXX_PORT_STS_CMODE_SGMII)) - return true; + lane = 0xff; /* Unused */ - return false; + return lane; } -int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) +static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port) { - int err; - - if (mv88e6352_port_has_serdes(chip, port)) { - err = mv88e6352_serdes_power_set(chip, on); - if (err < 0) - return err; - } + if (mv88e6xxx_serdes_get_lane(chip, port)) + return true; - return 0; + return false; } struct mv88e6352_serdes_hw_stat { @@ -186,214 +183,178 @@ static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port) struct dsa_switch *ds = chip->ds; u16 status; bool up; + int err; - mv88e6352_serdes_read(chip, MII_BMSR, &status); + err = mv88e6352_serdes_read(chip, MII_BMSR, &status); + if (err) + return; /* Status must be read twice in order to give the current link * status. Otherwise the change in link status since the last * read of the register is returned. */ - mv88e6352_serdes_read(chip, MII_BMSR, &status); + err = mv88e6352_serdes_read(chip, MII_BMSR, &status); + if (err) + return; up = status & BMSR_LSTATUS; dsa_port_phylink_mac_change(ds, port, up); } -static irqreturn_t mv88e6352_serdes_thread_fn(int irq, void *dev_id) +irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + u8 lane) { - struct mv88e6xxx_port *port = dev_id; - struct mv88e6xxx_chip *chip = port->chip; irqreturn_t ret = IRQ_NONE; u16 status; int err; - mv88e6xxx_reg_lock(chip); - err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_INT_STATUS, &status); if (err) - goto out; + return ret; if (status & MV88E6352_SERDES_INT_LINK_CHANGE) { ret = IRQ_HANDLED; - mv88e6352_serdes_irq_link(chip, port->port); + mv88e6352_serdes_irq_link(chip, port); } -out: - mv88e6xxx_reg_unlock(chip); return ret; } -static int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip) +int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool enable) { - return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, - MV88E6352_SERDES_INT_LINK_CHANGE); -} + u16 val = 0; -static int mv88e6352_serdes_irq_disable(struct mv88e6xxx_chip *chip) -{ - return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, 0); + if (enable) + val |= MV88E6352_SERDES_INT_LINK_CHANGE; + + return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, val); } -int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) +unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port) { - int err; - - if (!mv88e6352_port_has_serdes(chip, port)) - return 0; - - chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain, - MV88E6352_SERDES_IRQ); - if (chip->ports[port].serdes_irq < 0) { - dev_err(chip->dev, "Unable to map SERDES irq: %d\n", - chip->ports[port].serdes_irq); - return chip->ports[port].serdes_irq; - } - - /* Requesting the IRQ will trigger irq callbacks. So we cannot - * hold the reg_lock. - */ - mv88e6xxx_reg_unlock(chip); - err = request_threaded_irq(chip->ports[port].serdes_irq, NULL, - mv88e6352_serdes_thread_fn, - IRQF_ONESHOT, "mv88e6xxx-serdes", - &chip->ports[port]); - mv88e6xxx_reg_lock(chip); - - if (err) { - dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n", - err); - return err; - } - - return mv88e6352_serdes_irq_enable(chip); + return irq_find_mapping(chip->g2_irq.domain, MV88E6352_SERDES_IRQ); } -void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) +u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { - if (!mv88e6352_port_has_serdes(chip, port)) - return; - - mv88e6352_serdes_irq_disable(chip); + u8 cmode = chip->ports[port].cmode; + u8 lane = 0; - /* Freeing the IRQ will trigger irq callbacks. So we cannot - * hold the reg_lock. - */ - mv88e6xxx_reg_unlock(chip); - free_irq(chip->ports[port].serdes_irq, &chip->ports[port]); - mv88e6xxx_reg_lock(chip); + switch (port) { + case 5: + if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX || + cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX) + lane = MV88E6341_PORT5_LANE; + break; + } - chip->ports[port].serdes_irq = 0; + return lane; } -/* Return the SERDES lane address a port is using. Only Ports 9 and 10 - * have SERDES lanes. Returns -ENODEV if a port does not have a lane. - */ -static int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode = chip->ports[port].cmode; + u8 lane = 0; switch (port) { case 9: - if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX || cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX) - return MV88E6390_PORT9_LANE0; - return -ENODEV; + lane = MV88E6390_PORT9_LANE0; + break; case 10: - if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX || cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX) - return MV88E6390_PORT10_LANE0; - return -ENODEV; - default: - return -ENODEV; + lane = MV88E6390_PORT10_LANE0; + break; } + + return lane; } -/* Return the SERDES lane address a port is using. Ports 9 and 10 can - * use multiple lanes. If so, return the first lane the port uses. - * Returns -ENODEV if a port does not have a lane. - */ -int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { - u8 cmode_port9, cmode_port10, cmode_port; - - cmode_port9 = chip->ports[9].cmode; - cmode_port10 = chip->ports[10].cmode; - cmode_port = chip->ports[port].cmode; + u8 cmode_port = chip->ports[port].cmode; + u8 cmode_port10 = chip->ports[10].cmode; + u8 cmode_port9 = chip->ports[9].cmode; + u8 lane = 0; switch (port) { case 2: - if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX) - if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) - return MV88E6390_PORT9_LANE1; - return -ENODEV; + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX) + lane = MV88E6390_PORT9_LANE1; + break; case 3: - if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI) - if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) - return MV88E6390_PORT9_LANE2; - return -ENODEV; + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX) + lane = MV88E6390_PORT9_LANE2; + break; case 4: - if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI) - if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) - return MV88E6390_PORT9_LANE3; - return -ENODEV; + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX) + lane = MV88E6390_PORT9_LANE3; + break; case 5: - if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX) - if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) - return MV88E6390_PORT10_LANE1; - return -ENODEV; + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX) + lane = MV88E6390_PORT10_LANE1; + break; case 6: - if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI) - if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) - return MV88E6390_PORT10_LANE2; - return -ENODEV; + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX) + lane = MV88E6390_PORT10_LANE2; + break; case 7: - if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI) - if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) - return MV88E6390_PORT10_LANE3; - return -ENODEV; + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX) + lane = MV88E6390_PORT10_LANE3; + break; case 9: - if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_XAUI || cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI) - return MV88E6390_PORT9_LANE0; - return -ENODEV; + lane = MV88E6390_PORT9_LANE0; + break; case 10: - if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_XAUI || cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI) - return MV88E6390_PORT10_LANE0; - return -ENODEV; - default: - return -ENODEV; + lane = MV88E6390_PORT10_LANE0; + break; } + + return lane; } -/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */ -static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane, - bool on) +/* Set power up/down for 10GBASE-R and 10GBASE-X4/X2 */ +static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, u8 lane, + bool up) { u16 val, new_val; int err; @@ -404,7 +365,7 @@ static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane, if (err) return err; - if (on) + if (up) new_val = val & ~(MV88E6390_PCS_CONTROL_1_RESET | MV88E6390_PCS_CONTROL_1_LOOPBACK | MV88E6390_PCS_CONTROL_1_PDOWN); @@ -418,9 +379,9 @@ static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane, return err; } -/* Set the power on/off for SGMII and 1000Base-X */ -static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane, - bool on) +/* Set power up/down for SGMII and 1000Base-X */ +static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, u8 lane, + bool up) { u16 val, new_val; int err; @@ -430,7 +391,7 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane, if (err) return err; - if (on) + if (up) new_val = val & ~(MV88E6390_SGMII_CONTROL_RESET | MV88E6390_SGMII_CONTROL_LOOPBACK | MV88E6390_SGMII_CONTROL_PDOWN); @@ -444,70 +405,32 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane, return err; } -static int mv88e6390_serdes_power_lane(struct mv88e6xxx_chip *chip, int port, - int lane, bool on) +int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool up) { u8 cmode = chip->ports[port].cmode; switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: - case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: + case MV88E6XXX_PORT_STS_CMODE_1000BASEX: case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - return mv88e6390_serdes_power_sgmii(chip, lane, on); + return mv88e6390_serdes_power_sgmii(chip, lane, up); case MV88E6XXX_PORT_STS_CMODE_XAUI: case MV88E6XXX_PORT_STS_CMODE_RXAUI: - return mv88e6390_serdes_power_10g(chip, lane, on); - } - - return 0; -} - -int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) -{ - int lane; - - lane = mv88e6390_serdes_get_lane(chip, port); - if (lane == -ENODEV) - return 0; - - if (lane < 0) - return lane; - - switch (port) { - case 9 ... 10: - return mv88e6390_serdes_power_lane(chip, port, lane, on); - } - - return 0; -} - -int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) -{ - int lane; - - lane = mv88e6390x_serdes_get_lane(chip, port); - if (lane == -ENODEV) - return 0; - - if (lane < 0) - return lane; - - switch (port) { - case 2 ... 4: - case 5 ... 7: - case 9 ... 10: - return mv88e6390_serdes_power_lane(chip, port, lane, on); + return mv88e6390_serdes_power_10g(chip, lane, up); } return 0; } static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip, - int port, int lane) + int port, u8 lane) { + u8 cmode = chip->ports[port].cmode; struct dsa_switch *ds = chip->ds; int duplex = DUPLEX_UNKNOWN; int speed = SPEED_UNKNOWN; + phy_interface_t mode; int link, err; u16 status; @@ -527,7 +450,10 @@ static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip, switch (status & MV88E6390_SGMII_PHY_STATUS_SPEED_MASK) { case MV88E6390_SGMII_PHY_STATUS_SPEED_1000: - speed = SPEED_1000; + if (cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX) + speed = SPEED_2500; + else + speed = SPEED_1000; break; case MV88E6390_SGMII_PHY_STATUS_SPEED_100: speed = SPEED_100; @@ -541,8 +467,22 @@ static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip, } } + switch (cmode) { + case MV88E6XXX_PORT_STS_CMODE_SGMII: + mode = PHY_INTERFACE_MODE_SGMII; + break; + case MV88E6XXX_PORT_STS_CMODE_1000BASEX: + mode = PHY_INTERFACE_MODE_1000BASEX; + break; + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + mode = PHY_INTERFACE_MODE_2500BASEX; + break; + default: + mode = PHY_INTERFACE_MODE_NA; + } + err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, - PAUSE_OFF, PHY_INTERFACE_MODE_NA); + PAUSE_OFF, mode); if (err) dev_err(chip->dev, "can't propagate PHY settings to MAC: %d\n", err); @@ -551,55 +491,35 @@ static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip, } static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip, - int lane) -{ - return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_INT_ENABLE, - MV88E6390_SGMII_INT_LINK_DOWN | - MV88E6390_SGMII_INT_LINK_UP); -} - -static int mv88e6390_serdes_irq_disable_sgmii(struct mv88e6xxx_chip *chip, - int lane) + u8 lane, bool enable) { - return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_INT_ENABLE, 0); -} + u16 val = 0; -int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, - int lane) -{ - u8 cmode = chip->ports[port].cmode; - int err = 0; - - switch (cmode) { - case MV88E6XXX_PORT_STS_CMODE_SGMII: - case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: - case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - err = mv88e6390_serdes_irq_enable_sgmii(chip, lane); - } + if (enable) + val |= MV88E6390_SGMII_INT_LINK_DOWN | + MV88E6390_SGMII_INT_LINK_UP; - return err; + return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_INT_ENABLE, val); } -int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port, - int lane) +int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool enable) { u8 cmode = chip->ports[port].cmode; - int err = 0; switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: - case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: + case MV88E6XXX_PORT_STS_CMODE_1000BASEX: case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - err = mv88e6390_serdes_irq_disable_sgmii(chip, lane); + return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable); } - return err; + return 0; } static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip, - int lane, u16 *status) + u8 lane, u16 *status) { int err; @@ -609,129 +529,32 @@ static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip, return err; } -static irqreturn_t mv88e6390_serdes_thread_fn(int irq, void *dev_id) +irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + u8 lane) { - struct mv88e6xxx_port *port = dev_id; - struct mv88e6xxx_chip *chip = port->chip; + u8 cmode = chip->ports[port].cmode; irqreturn_t ret = IRQ_NONE; - u8 cmode = port->cmode; u16 status; - int lane; int err; - lane = mv88e6390x_serdes_get_lane(chip, port->port); - - mv88e6xxx_reg_lock(chip); - switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: - case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: + case MV88E6XXX_PORT_STS_CMODE_1000BASEX: case MV88E6XXX_PORT_STS_CMODE_2500BASEX: err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status); if (err) - goto out; + return ret; if (status & (MV88E6390_SGMII_INT_LINK_DOWN | MV88E6390_SGMII_INT_LINK_UP)) { ret = IRQ_HANDLED; - mv88e6390_serdes_irq_link_sgmii(chip, port->port, lane); + mv88e6390_serdes_irq_link_sgmii(chip, port, lane); } } -out: - mv88e6xxx_reg_unlock(chip); return ret; } -int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) -{ - int lane; - int err; - - lane = mv88e6390x_serdes_get_lane(chip, port); - - if (lane == -ENODEV) - return 0; - - if (lane < 0) - return lane; - - chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain, - port); - if (chip->ports[port].serdes_irq < 0) { - dev_err(chip->dev, "Unable to map SERDES irq: %d\n", - chip->ports[port].serdes_irq); - return chip->ports[port].serdes_irq; - } - - /* Requesting the IRQ will trigger irq callbacks. So we cannot - * hold the reg_lock. - */ - mv88e6xxx_reg_unlock(chip); - err = request_threaded_irq(chip->ports[port].serdes_irq, NULL, - mv88e6390_serdes_thread_fn, - IRQF_ONESHOT, "mv88e6xxx-serdes", - &chip->ports[port]); - mv88e6xxx_reg_lock(chip); - - if (err) { - dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n", - err); - return err; - } - - return mv88e6390_serdes_irq_enable(chip, port, lane); -} - -int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) -{ - if (port < 9) - return 0; - - return mv88e6390x_serdes_irq_setup(chip, port); -} - -void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) -{ - int lane = mv88e6390x_serdes_get_lane(chip, port); - - if (lane == -ENODEV) - return; - - if (lane < 0) - return; - - mv88e6390_serdes_irq_disable(chip, port, lane); - - /* Freeing the IRQ will trigger irq callbacks. So we cannot - * hold the reg_lock. - */ - mv88e6xxx_reg_unlock(chip); - free_irq(chip->ports[port].serdes_irq, &chip->ports[port]); - mv88e6xxx_reg_lock(chip); - - chip->ports[port].serdes_irq = 0; -} - -void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) -{ - if (port < 9) - return; - - mv88e6390x_serdes_irq_free(chip, port); -} - -int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) +unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port) { - u8 cmode = chip->ports[port].cmode; - - if (port != 5) - return 0; - - if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || - cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || - cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX) - return mv88e6390_serdes_power_sgmii(chip, MV88E6341_ADDR_SERDES, - on); - - return 0; + return irq_find_mapping(chip->g2_irq.domain, port); } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index ff5b94439335..bd8df36ab537 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -28,7 +28,7 @@ #define MV88E6352_SERDES_INT_STATUS 0x13 -#define MV88E6341_ADDR_SERDES 0x15 +#define MV88E6341_PORT5_LANE 0x15 #define MV88E6390_PORT9_LANE0 0x09 #define MV88E6390_PORT9_LANE1 0x12 @@ -74,26 +74,94 @@ #define MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID BIT(11) #define MV88E6390_SGMII_PHY_STATUS_LINK BIT(10) -int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); -int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); -int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); -int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); -int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); -int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port); -void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port); -int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port); -void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port); +u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, + int port); +unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, + int port); +int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool on); +int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool on); +int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool enable); +int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool enable); +irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + u8 lane); +irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + u8 lane); int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port, uint8_t *data); int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, uint64_t *data); -int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, - int lane); -int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port, - int lane); -int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port); -void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port); +/* Return the (first) SERDES lane address a port is using, 0 otherwise. */ +static inline u8 mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip, + int port) +{ + if (!chip->info->ops->serdes_get_lane) + return 0; + + return chip->info->ops->serdes_get_lane(chip, port); +} + +static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip, + int port, u8 lane) +{ + if (!chip->info->ops->serdes_power) + return -EOPNOTSUPP; + + return chip->info->ops->serdes_power(chip, port, lane, true); +} + +static inline int mv88e6xxx_serdes_power_down(struct mv88e6xxx_chip *chip, + int port, u8 lane) +{ + if (!chip->info->ops->serdes_power) + return -EOPNOTSUPP; + + return chip->info->ops->serdes_power(chip, port, lane, false); +} + +static inline unsigned int +mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port) +{ + if (!chip->info->ops->serdes_irq_mapping) + return 0; + + return chip->info->ops->serdes_irq_mapping(chip, port); +} + +static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip, + int port, u8 lane) +{ + if (!chip->info->ops->serdes_irq_enable) + return -EOPNOTSUPP; + + return chip->info->ops->serdes_irq_enable(chip, port, lane, true); +} + +static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip, + int port, u8 lane) +{ + if (!chip->info->ops->serdes_irq_enable) + return -EOPNOTSUPP; + + return chip->info->ops->serdes_irq_enable(chip, port, lane, false); +} + +static inline irqreturn_t +mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, u8 lane) +{ + if (!chip->info->ops->serdes_irq_status) + return IRQ_NONE; + + return chip->info->ops->serdes_irq_status(chip, port, lane); +} #endif diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c index 5fc78a063843..282fe08db050 100644 --- a/drivers/net/dsa/mv88e6xxx/smi.c +++ b/drivers/net/dsa/mv88e6xxx/smi.c @@ -64,8 +64,10 @@ static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip, if (err) return err; - if (!!(data >> bit) == !!val) + if (!!(data & BIT(bit)) == !!val) return 0; + + usleep_range(1000, 2000); } return -ETIMEDOUT; diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig index 770134a66e48..55424f39cb0d 100644 --- a/drivers/net/dsa/sja1105/Kconfig +++ b/drivers/net/dsa/sja1105/Kconfig @@ -23,3 +23,11 @@ config NET_DSA_SJA1105_PTP help This enables support for timestamping and PTP clock manipulations in the SJA1105 DSA driver. + +config NET_DSA_SJA1105_TAS + bool "Support for the Time-Aware Scheduler on NXP SJA1105" + depends on NET_DSA_SJA1105 + help + This enables support for the TTEthernet-based egress scheduling + engine in the SJA1105 DSA driver, which is controlled using a + hardware offload of the tc-tqprio qdisc. diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile index 4483113e6259..66161e874344 100644 --- a/drivers/net/dsa/sja1105/Makefile +++ b/drivers/net/dsa/sja1105/Makefile @@ -12,3 +12,7 @@ sja1105-objs := \ ifdef CONFIG_NET_DSA_SJA1105_PTP sja1105-objs += sja1105_ptp.o endif + +ifdef CONFIG_NET_DSA_SJA1105_TAS +sja1105-objs += sja1105_tas.o +endif diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 78094db32622..e53e494c22e0 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -20,6 +20,8 @@ */ #define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10) +#include "sja1105_tas.h" + /* Keeps the different addresses between E/T and P/Q/R/S */ struct sja1105_regs { u64 device_id; @@ -104,6 +106,7 @@ struct sja1105_private { */ struct mutex mgmt_lock; struct sja1105_tagger_data tagger_data; + struct sja1105_tas_data tas_data; }; #include "sja1105_dynamic_config.h" @@ -120,6 +123,9 @@ typedef enum { SPI_WRITE = 1, } sja1105_spi_rw_mode_t; +/* From sja1105_main.c */ +int sja1105_static_config_reload(struct sja1105_private *priv); + /* From sja1105_spi.c */ int sja1105_spi_send_packed_buf(const struct sja1105_private *priv, sja1105_spi_rw_mode_t rw, u64 reg_addr, diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index 9988c9d18567..91da430045ff 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -488,6 +488,8 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr, /* SJA1105E/T: First generation */ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { + [BLK_IDX_SCHEDULE] = {0}, + [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0}, [BLK_IDX_L2_LOOKUP] = { .entry_packing = sja1105et_dyn_l2_lookup_entry_packing, .cmd_packing = sja1105et_l2_lookup_cmd_packing, @@ -529,6 +531,8 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD, .addr = 0x36, }, + [BLK_IDX_SCHEDULE_PARAMS] = {0}, + [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0}, [BLK_IDX_L2_LOOKUP_PARAMS] = { .entry_packing = sja1105et_l2_lookup_params_entry_packing, .cmd_packing = sja1105et_l2_lookup_params_cmd_packing, @@ -552,6 +556,8 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { /* SJA1105P/Q/R/S: Second generation */ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { + [BLK_IDX_SCHEDULE] = {0}, + [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0}, [BLK_IDX_L2_LOOKUP] = { .entry_packing = sja1105pqrs_dyn_l2_lookup_entry_packing, .cmd_packing = sja1105pqrs_l2_lookup_cmd_packing, @@ -593,6 +599,8 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD, .addr = 0x4B, }, + [BLK_IDX_SCHEDULE_PARAMS] = {0}, + [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0}, [BLK_IDX_L2_LOOKUP_PARAMS] = { .entry_packing = sja1105et_l2_lookup_params_entry_packing, .cmd_packing = sja1105et_l2_lookup_params_cmd_packing, diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index df976b259e43..b9def744bcb3 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -22,6 +22,7 @@ #include <linux/if_ether.h> #include <linux/dsa/8021q.h> #include "sja1105.h" +#include "sja1105_tas.h" static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, unsigned int startup_delay) @@ -384,7 +385,9 @@ static int sja1105_init_general_params(struct sja1105_private *priv) /* Disallow dynamic changing of the mirror port */ .mirr_ptacu = 0, .switchid = priv->ds->index, - /* Priority queue for link-local frames trapped to CPU */ + /* Priority queue for link-local management frames + * (both ingress to and egress from CPU - PTP, STP etc) + */ .hostprio = 7, .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, @@ -1380,7 +1383,7 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port, * modify at runtime (currently only MAC) and restore them after uploading, * such that this operation is relatively seamless. */ -static int sja1105_static_config_reload(struct sja1105_private *priv) +int sja1105_static_config_reload(struct sja1105_private *priv) { struct sja1105_mac_config_entry *mac; int speed_mbps[SJA1105_NUM_PORTS]; @@ -1711,6 +1714,9 @@ static int sja1105_setup(struct dsa_switch *ds) */ ds->vlan_filtering_is_global = true; + /* Advertise the 8 egress queues */ + ds->num_tx_queues = SJA1105_NUM_TC; + /* The DSA/switchdev model brings up switch ports in standalone mode by * default, and that means vlan_filtering is 0 since they're not under * a bridge, so it's safe to set up switch tagging at this time. @@ -1722,12 +1728,28 @@ static void sja1105_teardown(struct dsa_switch *ds) { struct sja1105_private *priv = ds->priv; + sja1105_tas_teardown(ds); cancel_work_sync(&priv->tagger_data.rxtstamp_work); skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue); sja1105_ptp_clock_unregister(priv); sja1105_static_config_free(&priv->static_config); } +static int sja1105_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct net_device *slave; + + if (!dsa_is_user_port(ds, port)) + return 0; + + slave = ds->ports[port].slave; + + slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + return 0; +} + static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, struct sk_buff *skb, bool takets) { @@ -2036,6 +2058,18 @@ static bool sja1105_port_txtstamp(struct dsa_switch *ds, int port, return true; } +static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, + enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_TAPRIO: + return sja1105_setup_tc_taprio(ds, port, type_data); + default: + return -EOPNOTSUPP; + } +} + static const struct dsa_switch_ops sja1105_switch_ops = { .get_tag_protocol = sja1105_get_tag_protocol, .setup = sja1105_setup, @@ -2049,6 +2083,7 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .get_ethtool_stats = sja1105_get_ethtool_stats, .get_sset_count = sja1105_get_sset_count, .get_ts_info = sja1105_get_ts_info, + .port_enable = sja1105_port_enable, .port_fdb_dump = sja1105_fdb_dump, .port_fdb_add = sja1105_fdb_add, .port_fdb_del = sja1105_fdb_del, @@ -2067,6 +2102,7 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .port_hwtstamp_set = sja1105_hwtstamp_set, .port_rxtstamp = sja1105_port_rxtstamp, .port_txtstamp = sja1105_port_txtstamp, + .port_setup_tc = sja1105_port_setup_tc, }; static int sja1105_check_device_id(struct sja1105_private *priv) @@ -2176,6 +2212,8 @@ static int sja1105_probe(struct spi_device *spi) } mutex_init(&priv->mgmt_lock); + sja1105_tas_setup(ds); + return dsa_register_switch(priv->ds); } diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index b31c737dc560..0d03e13e9909 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -371,6 +371,63 @@ size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr, return size; } +static size_t +sja1105_schedule_entry_points_params_entry_packing(void *buf, void *entry_ptr, + enum packing_op op) +{ + struct sja1105_schedule_entry_points_params_entry *entry = entry_ptr; + const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY; + + sja1105_packing(buf, &entry->clksrc, 31, 30, size, op); + sja1105_packing(buf, &entry->actsubsch, 29, 27, size, op); + return size; +} + +static size_t +sja1105_schedule_entry_points_entry_packing(void *buf, void *entry_ptr, + enum packing_op op) +{ + struct sja1105_schedule_entry_points_entry *entry = entry_ptr; + const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY; + + sja1105_packing(buf, &entry->subschindx, 31, 29, size, op); + sja1105_packing(buf, &entry->delta, 28, 11, size, op); + sja1105_packing(buf, &entry->address, 10, 1, size, op); + return size; +} + +static size_t sja1105_schedule_params_entry_packing(void *buf, void *entry_ptr, + enum packing_op op) +{ + const size_t size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY; + struct sja1105_schedule_params_entry *entry = entry_ptr; + int offset, i; + + for (i = 0, offset = 16; i < 8; i++, offset += 10) + sja1105_packing(buf, &entry->subscheind[i], + offset + 9, offset + 0, size, op); + return size; +} + +static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr, + enum packing_op op) +{ + const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY; + struct sja1105_schedule_entry *entry = entry_ptr; + + sja1105_packing(buf, &entry->winstindex, 63, 54, size, op); + sja1105_packing(buf, &entry->winend, 53, 53, size, op); + sja1105_packing(buf, &entry->winst, 52, 52, size, op); + sja1105_packing(buf, &entry->destports, 51, 47, size, op); + sja1105_packing(buf, &entry->setvalid, 46, 46, size, op); + sja1105_packing(buf, &entry->txen, 45, 45, size, op); + sja1105_packing(buf, &entry->resmedia_en, 44, 44, size, op); + sja1105_packing(buf, &entry->resmedia, 43, 36, size, op); + sja1105_packing(buf, &entry->vlindex, 35, 26, size, op); + sja1105_packing(buf, &entry->delta, 25, 8, size, op); + return size; +} + size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr, enum packing_op op) { @@ -447,11 +504,15 @@ static void sja1105_table_write_crc(u8 *table_start, u8 *crc_ptr) * before blindly indexing kernel memory with the blk_idx. */ static u64 blk_id_map[BLK_IDX_MAX] = { + [BLK_IDX_SCHEDULE] = BLKID_SCHEDULE, + [BLK_IDX_SCHEDULE_ENTRY_POINTS] = BLKID_SCHEDULE_ENTRY_POINTS, [BLK_IDX_L2_LOOKUP] = BLKID_L2_LOOKUP, [BLK_IDX_L2_POLICING] = BLKID_L2_POLICING, [BLK_IDX_VLAN_LOOKUP] = BLKID_VLAN_LOOKUP, [BLK_IDX_L2_FORWARDING] = BLKID_L2_FORWARDING, [BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG, + [BLK_IDX_SCHEDULE_PARAMS] = BLKID_SCHEDULE_PARAMS, + [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = BLKID_SCHEDULE_ENTRY_POINTS_PARAMS, [BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS, [BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS, [BLK_IDX_AVB_PARAMS] = BLKID_AVB_PARAMS, @@ -461,6 +522,13 @@ static u64 blk_id_map[BLK_IDX_MAX] = { const char *sja1105_static_config_error_msg[] = { [SJA1105_CONFIG_OK] = "", + [SJA1105_TTETHERNET_NOT_SUPPORTED] = + "schedule-table present, but TTEthernet is " + "only supported on T and Q/S", + [SJA1105_INCORRECT_TTETHERNET_CONFIGURATION] = + "schedule-table present, but one of " + "schedule-entry-points-table, schedule-parameters-table or " + "schedule-entry-points-parameters table is empty", [SJA1105_MISSING_L2_POLICING_TABLE] = "l2-policing-table needs to have at least one entry", [SJA1105_MISSING_L2_FORWARDING_TABLE] = @@ -508,6 +576,21 @@ sja1105_static_config_check_valid(const struct sja1105_static_config *config) #define IS_FULL(blk_idx) \ (tables[blk_idx].entry_count == tables[blk_idx].ops->max_entry_count) + if (tables[BLK_IDX_SCHEDULE].entry_count) { + if (config->device_id != SJA1105T_DEVICE_ID && + config->device_id != SJA1105QS_DEVICE_ID) + return SJA1105_TTETHERNET_NOT_SUPPORTED; + + if (tables[BLK_IDX_SCHEDULE_ENTRY_POINTS].entry_count == 0) + return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION; + + if (!IS_FULL(BLK_IDX_SCHEDULE_PARAMS)) + return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION; + + if (!IS_FULL(BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS)) + return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION; + } + if (tables[BLK_IDX_L2_POLICING].entry_count == 0) return SJA1105_MISSING_L2_POLICING_TABLE; @@ -614,6 +697,8 @@ sja1105_static_config_get_length(const struct sja1105_static_config *config) /* SJA1105E: First generation, no TTEthernet */ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = { + [BLK_IDX_SCHEDULE] = {0}, + [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0}, [BLK_IDX_L2_LOOKUP] = { .packing = sja1105et_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -644,6 +729,8 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = { .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY, .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, }, + [BLK_IDX_SCHEDULE_PARAMS] = {0}, + [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0}, [BLK_IDX_L2_LOOKUP_PARAMS] = { .packing = sja1105et_l2_lookup_params_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), @@ -678,6 +765,18 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = { /* SJA1105T: First generation, TTEthernet */ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = { + [BLK_IDX_SCHEDULE] = { + .packing = sja1105_schedule_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT, + }, + [BLK_IDX_SCHEDULE_ENTRY_POINTS] = { + .packing = sja1105_schedule_entry_points_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT, + }, [BLK_IDX_L2_LOOKUP] = { .packing = sja1105et_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -708,6 +807,18 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = { .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY, .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, }, + [BLK_IDX_SCHEDULE_PARAMS] = { + .packing = sja1105_schedule_params_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT, + }, + [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = { + .packing = sja1105_schedule_entry_points_params_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT, + }, [BLK_IDX_L2_LOOKUP_PARAMS] = { .packing = sja1105et_l2_lookup_params_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), @@ -742,6 +853,8 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = { /* SJA1105P: Second generation, no TTEthernet, no SGMII */ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = { + [BLK_IDX_SCHEDULE] = {0}, + [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0}, [BLK_IDX_L2_LOOKUP] = { .packing = sja1105pqrs_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -772,6 +885,8 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = { .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY, .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, }, + [BLK_IDX_SCHEDULE_PARAMS] = {0}, + [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0}, [BLK_IDX_L2_LOOKUP_PARAMS] = { .packing = sja1105pqrs_l2_lookup_params_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), @@ -806,6 +921,18 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = { /* SJA1105Q: Second generation, TTEthernet, no SGMII */ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = { + [BLK_IDX_SCHEDULE] = { + .packing = sja1105_schedule_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT, + }, + [BLK_IDX_SCHEDULE_ENTRY_POINTS] = { + .packing = sja1105_schedule_entry_points_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT, + }, [BLK_IDX_L2_LOOKUP] = { .packing = sja1105pqrs_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -836,6 +963,18 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = { .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY, .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, }, + [BLK_IDX_SCHEDULE_PARAMS] = { + .packing = sja1105_schedule_params_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT, + }, + [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = { + .packing = sja1105_schedule_entry_points_params_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT, + }, [BLK_IDX_L2_LOOKUP_PARAMS] = { .packing = sja1105pqrs_l2_lookup_params_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), @@ -870,6 +1009,8 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = { /* SJA1105R: Second generation, no TTEthernet, SGMII */ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = { + [BLK_IDX_SCHEDULE] = {0}, + [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0}, [BLK_IDX_L2_LOOKUP] = { .packing = sja1105pqrs_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -900,6 +1041,8 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = { .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY, .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, }, + [BLK_IDX_SCHEDULE_PARAMS] = {0}, + [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0}, [BLK_IDX_L2_LOOKUP_PARAMS] = { .packing = sja1105pqrs_l2_lookup_params_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), @@ -934,6 +1077,18 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = { /* SJA1105S: Second generation, TTEthernet, SGMII */ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = { + [BLK_IDX_SCHEDULE] = { + .packing = sja1105_schedule_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT, + }, + [BLK_IDX_SCHEDULE_ENTRY_POINTS] = { + .packing = sja1105_schedule_entry_points_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT, + }, [BLK_IDX_L2_LOOKUP] = { .packing = sja1105pqrs_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -964,6 +1119,18 @@ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = { .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY, .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, }, + [BLK_IDX_SCHEDULE_PARAMS] = { + .packing = sja1105_schedule_params_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT, + }, + [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = { + .packing = sja1105_schedule_entry_points_params_entry_packing, + .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry), + .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY, + .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT, + }, [BLK_IDX_L2_LOOKUP_PARAMS] = { .packing = sja1105pqrs_l2_lookup_params_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h index 684465fc0882..7f87022a2d61 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.h +++ b/drivers/net/dsa/sja1105/sja1105_static_config.h @@ -11,11 +11,15 @@ #define SJA1105_SIZE_DEVICE_ID 4 #define SJA1105_SIZE_TABLE_HEADER 12 +#define SJA1105_SIZE_SCHEDULE_ENTRY 8 +#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 4 #define SJA1105_SIZE_L2_POLICING_ENTRY 8 #define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8 #define SJA1105_SIZE_L2_FORWARDING_ENTRY 8 #define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12 #define SJA1105_SIZE_XMII_PARAMS_ENTRY 4 +#define SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY 12 +#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY 4 #define SJA1105ET_SIZE_L2_LOOKUP_ENTRY 12 #define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28 #define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4 @@ -29,11 +33,15 @@ /* UM10944.pdf Page 11, Table 2. Configuration Blocks */ enum { + BLKID_SCHEDULE = 0x00, + BLKID_SCHEDULE_ENTRY_POINTS = 0x01, BLKID_L2_LOOKUP = 0x05, BLKID_L2_POLICING = 0x06, BLKID_VLAN_LOOKUP = 0x07, BLKID_L2_FORWARDING = 0x08, BLKID_MAC_CONFIG = 0x09, + BLKID_SCHEDULE_PARAMS = 0x0A, + BLKID_SCHEDULE_ENTRY_POINTS_PARAMS = 0x0B, BLKID_L2_LOOKUP_PARAMS = 0x0D, BLKID_L2_FORWARDING_PARAMS = 0x0E, BLKID_AVB_PARAMS = 0x10, @@ -42,11 +50,15 @@ enum { }; enum sja1105_blk_idx { - BLK_IDX_L2_LOOKUP = 0, + BLK_IDX_SCHEDULE = 0, + BLK_IDX_SCHEDULE_ENTRY_POINTS, + BLK_IDX_L2_LOOKUP, BLK_IDX_L2_POLICING, BLK_IDX_VLAN_LOOKUP, BLK_IDX_L2_FORWARDING, BLK_IDX_MAC_CONFIG, + BLK_IDX_SCHEDULE_PARAMS, + BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS, BLK_IDX_L2_LOOKUP_PARAMS, BLK_IDX_L2_FORWARDING_PARAMS, BLK_IDX_AVB_PARAMS, @@ -59,11 +71,15 @@ enum sja1105_blk_idx { BLK_IDX_INVAL = -1, }; +#define SJA1105_MAX_SCHEDULE_COUNT 1024 +#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT 2048 #define SJA1105_MAX_L2_LOOKUP_COUNT 1024 #define SJA1105_MAX_L2_POLICING_COUNT 45 #define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096 #define SJA1105_MAX_L2_FORWARDING_COUNT 13 #define SJA1105_MAX_MAC_CONFIG_COUNT 5 +#define SJA1105_MAX_SCHEDULE_PARAMS_COUNT 1 +#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT 1 #define SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT 1 #define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1 #define SJA1105_MAX_GENERAL_PARAMS_COUNT 1 @@ -83,6 +99,23 @@ enum sja1105_blk_idx { #define SJA1105R_PART_NO 0x9A86 #define SJA1105S_PART_NO 0x9A87 +struct sja1105_schedule_entry { + u64 winstindex; + u64 winend; + u64 winst; + u64 destports; + u64 setvalid; + u64 txen; + u64 resmedia_en; + u64 resmedia; + u64 vlindex; + u64 delta; +}; + +struct sja1105_schedule_params_entry { + u64 subscheind[8]; +}; + struct sja1105_general_params_entry { u64 vllupformat; u64 mirr_ptacu; @@ -112,6 +145,17 @@ struct sja1105_general_params_entry { u64 replay_port; }; +struct sja1105_schedule_entry_points_entry { + u64 subschindx; + u64 delta; + u64 address; +}; + +struct sja1105_schedule_entry_points_params_entry { + u64 clksrc; + u64 actsubsch; +}; + struct sja1105_vlan_lookup_entry { u64 ving_mirr; u64 vegr_mirr; @@ -256,6 +300,8 @@ sja1105_static_config_get_length(const struct sja1105_static_config *config); typedef enum { SJA1105_CONFIG_OK = 0, + SJA1105_TTETHERNET_NOT_SUPPORTED, + SJA1105_INCORRECT_TTETHERNET_CONFIGURATION, SJA1105_MISSING_L2_POLICING_TABLE, SJA1105_MISSING_L2_FORWARDING_TABLE, SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE, diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c new file mode 100644 index 000000000000..33eca6a82ec5 --- /dev/null +++ b/drivers/net/dsa/sja1105/sja1105_tas.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> + */ +#include "sja1105.h" + +#define SJA1105_TAS_CLKSRC_DISABLED 0 +#define SJA1105_TAS_CLKSRC_STANDALONE 1 +#define SJA1105_TAS_CLKSRC_AS6802 2 +#define SJA1105_TAS_CLKSRC_PTP 3 +#define SJA1105_TAS_MAX_DELTA BIT(19) +#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0) + +/* This is not a preprocessor macro because the "ns" argument may or may not be + * s64 at caller side. This ensures it is properly type-cast before div_s64. + */ +static s64 ns_to_sja1105_delta(s64 ns) +{ + return div_s64(ns, 200); +} + +/* Lo and behold: the egress scheduler from hell. + * + * At the hardware level, the Time-Aware Shaper holds a global linear arrray of + * all schedule entries for all ports. These are the Gate Control List (GCL) + * entries, let's call them "timeslots" for short. This linear array of + * timeslots is held in BLK_IDX_SCHEDULE. + * + * Then there are a maximum of 8 "execution threads" inside the switch, which + * iterate cyclically through the "schedule". Each "cycle" has an entry point + * and an exit point, both being timeslot indices in the schedule table. The + * hardware calls each cycle a "subschedule". + * + * Subschedule (cycle) i starts when + * ptpclkval >= ptpschtm + BLK_IDX_SCHEDULE_ENTRY_POINTS[i].delta. + * + * The hardware scheduler iterates BLK_IDX_SCHEDULE with a k ranging from + * k = BLK_IDX_SCHEDULE_ENTRY_POINTS[i].address to + * k = BLK_IDX_SCHEDULE_PARAMS.subscheind[i] + * + * For each schedule entry (timeslot) k, the engine executes the gate control + * list entry for the duration of BLK_IDX_SCHEDULE[k].delta. + * + * +---------+ + * | | BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS + * +---------+ + * | + * +-----------------+ + * | .actsubsch + * BLK_IDX_SCHEDULE_ENTRY_POINTS v + * +-------+-------+ + * |cycle 0|cycle 1| + * +-------+-------+ + * | | | | + * +----------------+ | | +-------------------------------------+ + * | .subschindx | | .subschindx | + * | | +---------------+ | + * | .address | .address | | + * | | | | + * | | | | + * | BLK_IDX_SCHEDULE v v | + * | +-------+-------+-------+-------+-------+------+ | + * | |entry 0|entry 1|entry 2|entry 3|entry 4|entry5| | + * | +-------+-------+-------+-------+-------+------+ | + * | ^ ^ ^ ^ | + * | | | | | | + * | +-------------------------+ | | | | + * | | +-------------------------------+ | | | + * | | | +-------------------+ | | + * | | | | | | + * | +---------------------------------------------------------------+ | + * | |subscheind[0]<=subscheind[1]<=subscheind[2]<=...<=subscheind[7]| | + * | +---------------------------------------------------------------+ | + * | ^ ^ BLK_IDX_SCHEDULE_PARAMS | + * | | | | + * +--------+ +-------------------------------------------+ + * + * In the above picture there are two subschedules (cycles): + * + * - cycle 0: iterates the schedule table from 0 to 2 (and back) + * - cycle 1: iterates the schedule table from 3 to 5 (and back) + * + * All other possible execution threads must be marked as unused by making + * their "subschedule end index" (subscheind) equal to the last valid + * subschedule's end index (in this case 5). + */ +static int sja1105_init_scheduling(struct sja1105_private *priv) +{ + struct sja1105_schedule_entry_points_entry *schedule_entry_points; + struct sja1105_schedule_entry_points_params_entry + *schedule_entry_points_params; + struct sja1105_schedule_params_entry *schedule_params; + struct sja1105_tas_data *tas_data = &priv->tas_data; + struct sja1105_schedule_entry *schedule; + struct sja1105_table *table; + int schedule_start_idx; + s64 entry_point_delta; + int schedule_end_idx; + int num_entries = 0; + int num_cycles = 0; + int cycle = 0; + int i, k = 0; + int port; + + /* Discard previous Schedule Table */ + table = &priv->static_config.tables[BLK_IDX_SCHEDULE]; + if (table->entry_count) { + kfree(table->entries); + table->entry_count = 0; + } + + /* Discard previous Schedule Entry Points Parameters Table */ + table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS]; + if (table->entry_count) { + kfree(table->entries); + table->entry_count = 0; + } + + /* Discard previous Schedule Parameters Table */ + table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS]; + if (table->entry_count) { + kfree(table->entries); + table->entry_count = 0; + } + + /* Discard previous Schedule Entry Points Table */ + table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS]; + if (table->entry_count) { + kfree(table->entries); + table->entry_count = 0; + } + + /* Figure out the dimensioning of the problem */ + for (port = 0; port < SJA1105_NUM_PORTS; port++) { + if (tas_data->offload[port]) { + num_entries += tas_data->offload[port]->num_entries; + num_cycles++; + } + } + + /* Nothing to do */ + if (!num_cycles) + return 0; + + /* Pre-allocate space in the static config tables */ + + /* Schedule Table */ + table = &priv->static_config.tables[BLK_IDX_SCHEDULE]; + table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size, + GFP_KERNEL); + if (!table->entries) + return -ENOMEM; + table->entry_count = num_entries; + schedule = table->entries; + + /* Schedule Points Parameters Table */ + table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS]; + table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT, + table->ops->unpacked_entry_size, GFP_KERNEL); + if (!table->entries) + /* Previously allocated memory will be freed automatically in + * sja1105_static_config_free. This is true for all early + * returns below. + */ + return -ENOMEM; + table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT; + schedule_entry_points_params = table->entries; + + /* Schedule Parameters Table */ + table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS]; + table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT, + table->ops->unpacked_entry_size, GFP_KERNEL); + if (!table->entries) + return -ENOMEM; + table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT; + schedule_params = table->entries; + + /* Schedule Entry Points Table */ + table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS]; + table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size, + GFP_KERNEL); + if (!table->entries) + return -ENOMEM; + table->entry_count = num_cycles; + schedule_entry_points = table->entries; + + /* Finally start populating the static config tables */ + schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_STANDALONE; + schedule_entry_points_params->actsubsch = num_cycles - 1; + + for (port = 0; port < SJA1105_NUM_PORTS; port++) { + const struct tc_taprio_qopt_offload *offload; + + offload = tas_data->offload[port]; + if (!offload) + continue; + + schedule_start_idx = k; + schedule_end_idx = k + offload->num_entries - 1; + /* TODO this is the base time for the port's subschedule, + * relative to PTPSCHTM. But as we're using the standalone + * clock source and not PTP clock as time reference, there's + * little point in even trying to put more logic into this, + * like preserving the phases between the subschedules of + * different ports. We'll get all of that when switching to the + * PTP clock source. + */ + entry_point_delta = 1; + + schedule_entry_points[cycle].subschindx = cycle; + schedule_entry_points[cycle].delta = entry_point_delta; + schedule_entry_points[cycle].address = schedule_start_idx; + + /* The subschedule end indices need to be + * monotonically increasing. + */ + for (i = cycle; i < 8; i++) + schedule_params->subscheind[i] = schedule_end_idx; + + for (i = 0; i < offload->num_entries; i++, k++) { + s64 delta_ns = offload->entries[i].interval; + + schedule[k].delta = ns_to_sja1105_delta(delta_ns); + schedule[k].destports = BIT(port); + schedule[k].resmedia_en = true; + schedule[k].resmedia = SJA1105_GATE_MASK & + ~offload->entries[i].gate_mask; + } + cycle++; + } + + return 0; +} + +/* Be there 2 port subschedules, each executing an arbitrary number of gate + * open/close events cyclically. + * None of those gate events must ever occur at the exact same time, otherwise + * the switch is known to act in exotically strange ways. + * However the hardware doesn't bother performing these integrity checks. + * So here we are with the task of validating whether the new @admin offload + * has any conflict with the already established TAS configuration in + * tas_data->offload. We already know the other ports are in harmony with one + * another, otherwise we wouldn't have saved them. + * Each gate event executes periodically, with a period of @cycle_time and a + * phase given by its cycle's @base_time plus its offset within the cycle + * (which in turn is given by the length of the events prior to it). + * There are two aspects to possible collisions: + * - Collisions within one cycle's (actually the longest cycle's) time frame. + * For that, we need to compare the cartesian product of each possible + * occurrence of each event within one cycle time. + * - Collisions in the future. Events may not collide within one cycle time, + * but if two port schedules don't have the same periodicity (aka the cycle + * times aren't multiples of one another), they surely will some time in the + * future (actually they will collide an infinite amount of times). + */ +static bool +sja1105_tas_check_conflicts(struct sja1105_private *priv, int port, + const struct tc_taprio_qopt_offload *admin) +{ + struct sja1105_tas_data *tas_data = &priv->tas_data; + const struct tc_taprio_qopt_offload *offload; + s64 max_cycle_time, min_cycle_time; + s64 delta1, delta2; + s64 rbt1, rbt2; + s64 stop_time; + s64 t1, t2; + int i, j; + s32 rem; + + offload = tas_data->offload[port]; + if (!offload) + return false; + + /* Check if the two cycle times are multiples of one another. + * If they aren't, then they will surely collide. + */ + max_cycle_time = max(offload->cycle_time, admin->cycle_time); + min_cycle_time = min(offload->cycle_time, admin->cycle_time); + div_s64_rem(max_cycle_time, min_cycle_time, &rem); + if (rem) + return true; + + /* Calculate the "reduced" base time of each of the two cycles + * (transposed back as close to 0 as possible) by dividing to + * the cycle time. + */ + div_s64_rem(offload->base_time, offload->cycle_time, &rem); + rbt1 = rem; + + div_s64_rem(admin->base_time, admin->cycle_time, &rem); + rbt2 = rem; + + stop_time = max_cycle_time + max(rbt1, rbt2); + + /* delta1 is the relative base time of each GCL entry within + * the established ports' TAS config. + */ + for (i = 0, delta1 = 0; + i < offload->num_entries; + delta1 += offload->entries[i].interval, i++) { + /* delta2 is the relative base time of each GCL entry + * within the newly added TAS config. + */ + for (j = 0, delta2 = 0; + j < admin->num_entries; + delta2 += admin->entries[j].interval, j++) { + /* t1 follows all possible occurrences of the + * established ports' GCL entry i within the + * first cycle time. + */ + for (t1 = rbt1 + delta1; + t1 <= stop_time; + t1 += offload->cycle_time) { + /* t2 follows all possible occurrences + * of the newly added GCL entry j + * within the first cycle time. + */ + for (t2 = rbt2 + delta2; + t2 <= stop_time; + t2 += admin->cycle_time) { + if (t1 == t2) { + dev_warn(priv->ds->dev, + "GCL entry %d collides with entry %d of port %d\n", + j, i, port); + return true; + } + } + } + } + } + + return false; +} + +int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port, + struct tc_taprio_qopt_offload *admin) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_tas_data *tas_data = &priv->tas_data; + int other_port, rc, i; + + /* Can't change an already configured port (must delete qdisc first). + * Can't delete the qdisc from an unconfigured port. + */ + if (!!tas_data->offload[port] == admin->enable) + return -EINVAL; + + if (!admin->enable) { + taprio_offload_free(tas_data->offload[port]); + tas_data->offload[port] = NULL; + + rc = sja1105_init_scheduling(priv); + if (rc < 0) + return rc; + + return sja1105_static_config_reload(priv); + } + + /* The cycle time extension is the amount of time the last cycle from + * the old OPER needs to be extended in order to phase-align with the + * base time of the ADMIN when that becomes the new OPER. + * But of course our switch needs to be reset to switch-over between + * the ADMIN and the OPER configs - so much for a seamless transition. + * So don't add insult over injury and just say we don't support cycle + * time extension. + */ + if (admin->cycle_time_extension) + return -ENOTSUPP; + + if (!ns_to_sja1105_delta(admin->base_time)) { + dev_err(ds->dev, "A base time of zero is not hardware-allowed\n"); + return -ERANGE; + } + + for (i = 0; i < admin->num_entries; i++) { + s64 delta_ns = admin->entries[i].interval; + s64 delta_cycles = ns_to_sja1105_delta(delta_ns); + bool too_long, too_short; + + too_long = (delta_cycles >= SJA1105_TAS_MAX_DELTA); + too_short = (delta_cycles == 0); + if (too_long || too_short) { + dev_err(priv->ds->dev, + "Interval %llu too %s for GCL entry %d\n", + delta_ns, too_long ? "long" : "short", i); + return -ERANGE; + } + } + + for (other_port = 0; other_port < SJA1105_NUM_PORTS; other_port++) { + if (other_port == port) + continue; + + if (sja1105_tas_check_conflicts(priv, other_port, admin)) + return -ERANGE; + } + + tas_data->offload[port] = taprio_offload_get(admin); + + rc = sja1105_init_scheduling(priv); + if (rc < 0) + return rc; + + return sja1105_static_config_reload(priv); +} + +void sja1105_tas_setup(struct dsa_switch *ds) +{ +} + +void sja1105_tas_teardown(struct dsa_switch *ds) +{ + struct sja1105_private *priv = ds->priv; + struct tc_taprio_qopt_offload *offload; + int port; + + for (port = 0; port < SJA1105_NUM_PORTS; port++) { + offload = priv->tas_data.offload[port]; + if (!offload) + continue; + + taprio_offload_free(offload); + } +} diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h new file mode 100644 index 000000000000..0b803c30e640 --- /dev/null +++ b/drivers/net/dsa/sja1105/sja1105_tas.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> + */ +#ifndef _SJA1105_TAS_H +#define _SJA1105_TAS_H + +#include <net/pkt_sched.h> + +#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) + +struct sja1105_tas_data { + struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS]; +}; + +int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port, + struct tc_taprio_qopt_offload *admin); + +void sja1105_tas_setup(struct dsa_switch *ds); + +void sja1105_tas_teardown(struct dsa_switch *ds); + +#else + +/* C doesn't allow empty structures, bah! */ +struct sja1105_tas_data { + u8 dummy; +}; + +static inline int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port, + struct tc_taprio_qopt_offload *admin) +{ + return -EOPNOTSUPP; +} + +static inline void sja1105_tas_setup(struct dsa_switch *ds) { } + +static inline void sja1105_tas_teardown(struct dsa_switch *ds) { } + +#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) */ + +#endif /* _SJA1105_TAS_H */ diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 147051404194..8785c2ff3825 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -847,8 +847,7 @@ static void poll_vortex(struct net_device *dev) static int vortex_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *ndev = pci_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); if (!ndev || !netif_running(ndev)) return 0; @@ -861,8 +860,7 @@ static int vortex_suspend(struct device *dev) static int vortex_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *ndev = pci_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); int err; if (!ndev || !netif_running(ndev)) @@ -2175,7 +2173,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_addr = skb_frag_dma_map(vp->gendev, frag, 0, - frag->size, + skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(vp->gendev, dma_addr)) { for(i = i-1; i >= 0; i--) diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index dc9dee55976b..1e2de9d062bf 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -167,6 +167,7 @@ config ETHOC source "drivers/net/ethernet/packetengines/Kconfig" source "drivers/net/ethernet/pasemi/Kconfig" +source "drivers/net/ethernet/pensando/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" source "drivers/net/ethernet/qualcomm/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 4bc3c95562bf..77f9838a76c9 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -96,3 +96,4 @@ obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ +obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 010a2f48aea5..2a9f8643629c 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -110,7 +110,7 @@ static void greth_print_tx_packet(struct sk_buff *skb) print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1, skb_frag_address(&skb_shinfo(skb)->frags[i]), - skb_shinfo(skb)->frags[i].size, true); + skb_frag_size(&skb_shinfo(skb)->frags[i]), true); } } diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index edbb4b3604c7..174344c450af 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -2426,7 +2426,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) u32 thiscopy, remainder; struct sk_buff *skb = tcb->skb; u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; - struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frags = &skb_shinfo(skb)->frags[0]; struct phy_device *phydev = adapter->netdev->phydev; dma_addr_t dma_addr; struct tx_ring *tx_ring = &adapter->tx_ring; @@ -2488,11 +2488,11 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) frag++; } } else { - desc[frag].len_vlan = frags[i - 1].size; + desc[frag].len_vlan = skb_frag_size(&frags[i - 1]); dma_addr = skb_frag_dma_map(&adapter->pdev->dev, &frags[i - 1], 0, - frags[i - 1].size, + desc[frag].len_vlan, DMA_TO_DEVICE); desc[frag].addr_lo = lower_32_bits(dma_addr); desc[frag].addr_hi = upper_32_bits(dma_addr); diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index d19f2ecf8e84..8baf847e8622 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -808,6 +808,12 @@ struct ena_admin_host_info { u16 num_cpus; u16 reserved; + + /* 1 :0 : reserved + * 2 : interrupt_moderation + * 31:3 : reserved + */ + u32 driver_supported_features; }; struct ena_admin_rss_ind_table_entry { @@ -1110,6 +1116,8 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) #define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 #define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) +#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2 +#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) /* aenq_common_desc */ #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 911a2e7a375a..ea62604fdf8c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -1278,40 +1278,33 @@ static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) return 0; } -static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) -{ - size_t size; - - size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; - - ena_dev->intr_moder_tbl = - devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); - if (!ena_dev->intr_moder_tbl) - return -ENOMEM; - - ena_com_config_default_interrupt_moderation_table(ena_dev); - - return 0; -} - static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, u16 intr_delay_resolution) { - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; - unsigned int i; + /* Initial value of intr_delay_resolution might be 0 */ + u16 prev_intr_delay_resolution = + ena_dev->intr_delay_resolution ? + ena_dev->intr_delay_resolution : + ENA_DEFAULT_INTR_DELAY_RESOLUTION; if (!intr_delay_resolution) { pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); - intr_delay_resolution = 1; + intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; } - ena_dev->intr_delay_resolution = intr_delay_resolution; /* update Rx */ - for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++) - intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution; + ena_dev->intr_moder_rx_interval = + ena_dev->intr_moder_rx_interval * + prev_intr_delay_resolution / + intr_delay_resolution; /* update Tx */ - ena_dev->intr_moder_tx_interval /= intr_delay_resolution; + ena_dev->intr_moder_tx_interval = + ena_dev->intr_moder_tx_interval * + prev_intr_delay_resolution / + intr_delay_resolution; + + ena_dev->intr_delay_resolution = intr_delay_resolution; } /*****************************************************************************/ @@ -2776,42 +2769,34 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) ENA_ADMIN_INTERRUPT_MODERATION); } -int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, - u32 tx_coalesce_usecs) +static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, + u32 intr_delay_resolution, + u32 *intr_moder_interval) { - if (!ena_dev->intr_delay_resolution) { + if (!intr_delay_resolution) { pr_err("Illegal interrupt delay granularity value\n"); return -EFAULT; } - ena_dev->intr_moder_tx_interval = tx_coalesce_usecs / - ena_dev->intr_delay_resolution; + *intr_moder_interval = coalesce_usecs / intr_delay_resolution; return 0; } -int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, - u32 rx_coalesce_usecs) +int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs) { - if (!ena_dev->intr_delay_resolution) { - pr_err("Illegal interrupt delay granularity value\n"); - return -EFAULT; - } - - /* We use LOWEST entry of moderation table for storing - * nonadaptive interrupt coalescing values - */ - ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = - rx_coalesce_usecs / ena_dev->intr_delay_resolution; - - return 0; + return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs, + ena_dev->intr_delay_resolution, + &ena_dev->intr_moder_tx_interval); } -void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) +int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs) { - if (ena_dev->intr_moder_tbl) - devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl); - ena_dev->intr_moder_tbl = NULL; + return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs, + ena_dev->intr_delay_resolution, + &ena_dev->intr_moder_rx_interval); } int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) @@ -2838,66 +2823,14 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) return rc; } - rc = ena_com_init_interrupt_moderation_table(ena_dev); - if (rc) - goto err; - /* if moderation is supported by device we set adaptive moderation */ delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); - /* Disable adaptive moderation by default - can be enabled from - * ethtool - */ + /* Disable adaptive moderation by default - can be enabled later */ ena_com_disable_adaptive_moderation(ena_dev); return 0; -err: - ena_com_destroy_interrupt_moderation(ena_dev); - return rc; -} - -void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) -{ - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; - - if (!intr_moder_tbl) - return; - - intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = - ENA_INTR_LOWEST_USECS; - intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval = - ENA_INTR_LOWEST_PKTS; - intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval = - ENA_INTR_LOWEST_BYTES; - - intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval = - ENA_INTR_LOW_USECS; - intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval = - ENA_INTR_LOW_PKTS; - intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval = - ENA_INTR_LOW_BYTES; - - intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval = - ENA_INTR_MID_USECS; - intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval = - ENA_INTR_MID_PKTS; - intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval = - ENA_INTR_MID_BYTES; - - intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval = - ENA_INTR_HIGH_USECS; - intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval = - ENA_INTR_HIGH_PKTS; - intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval = - ENA_INTR_HIGH_BYTES; - - intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval = - ENA_INTR_HIGHEST_USECS; - intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval = - ENA_INTR_HIGHEST_PKTS; - intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval = - ENA_INTR_HIGHEST_BYTES; } unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) @@ -2907,49 +2840,7 @@ unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev * unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) { - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; - - if (intr_moder_tbl) - return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval; - - return 0; -} - -void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, - enum ena_intr_moder_level level, - struct ena_intr_moder_entry *entry) -{ - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; - - if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) - return; - - intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval; - if (ena_dev->intr_delay_resolution) - intr_moder_tbl[level].intr_moder_interval /= - ena_dev->intr_delay_resolution; - intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; - - /* use hardcoded value until ethtool supports bytecount parameter */ - if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED) - intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; -} - -void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, - enum ena_intr_moder_level level, - struct ena_intr_moder_entry *entry) -{ - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; - - if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) - return; - - entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval; - if (ena_dev->intr_delay_resolution) - entry->intr_moder_interval *= ena_dev->intr_delay_resolution; - entry->pkts_per_interval = - intr_moder_tbl[level].pkts_per_interval; - entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; + return ena_dev->intr_moder_rx_interval; } int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 0d3664fe260d..7c941eba0bc9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -72,46 +72,14 @@ /*****************************************************************************/ /* ENA adaptive interrupt moderation settings */ -#define ENA_INTR_LOWEST_USECS (0) -#define ENA_INTR_LOWEST_PKTS (3) -#define ENA_INTR_LOWEST_BYTES (2 * 1524) - -#define ENA_INTR_LOW_USECS (32) -#define ENA_INTR_LOW_PKTS (12) -#define ENA_INTR_LOW_BYTES (16 * 1024) - -#define ENA_INTR_MID_USECS (80) -#define ENA_INTR_MID_PKTS (48) -#define ENA_INTR_MID_BYTES (64 * 1024) - -#define ENA_INTR_HIGH_USECS (128) -#define ENA_INTR_HIGH_PKTS (96) -#define ENA_INTR_HIGH_BYTES (128 * 1024) - -#define ENA_INTR_HIGHEST_USECS (192) -#define ENA_INTR_HIGHEST_PKTS (128) -#define ENA_INTR_HIGHEST_BYTES (192 * 1024) - #define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196 -#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 4 -#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6 -#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4 -#define ENA_INTR_MODER_LEVEL_STRIDE 2 -#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF +#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 +#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 #define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF #define ENA_FEATURE_MAX_QUEUE_EXT_VER 1 -enum ena_intr_moder_level { - ENA_INTR_MODER_LOWEST = 0, - ENA_INTR_MODER_LOW, - ENA_INTR_MODER_MID, - ENA_INTR_MODER_HIGH, - ENA_INTR_MODER_HIGHEST, - ENA_INTR_MAX_NUM_OF_LEVELS, -}; - struct ena_llq_configurations { enum ena_admin_llq_header_location llq_header_location; enum ena_admin_llq_ring_entry_size llq_ring_entry_size; @@ -120,12 +88,6 @@ struct ena_llq_configurations { u16 llq_ring_entry_size_value; }; -struct ena_intr_moder_entry { - unsigned int intr_moder_interval; - unsigned int pkts_per_interval; - unsigned int bytes_per_interval; -}; - enum queue_direction { ENA_COM_IO_QUEUE_DIRECTION_TX, ENA_COM_IO_QUEUE_DIRECTION_RX @@ -376,7 +338,13 @@ struct ena_com_dev { struct ena_host_attribute host_attr; bool adaptive_coalescing; u16 intr_delay_resolution; + + /* interrupt moderation intervals are in usec divided by + * intr_delay_resolution, which is supplied by the device. + */ u32 intr_moder_tx_interval; + u32 intr_moder_rx_interval; + struct ena_intr_moder_entry *intr_moder_tbl; struct ena_com_llq_info llq_info; @@ -914,11 +882,6 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, */ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev); -/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources - * @ena_dev: ENA communication layer struct - */ -void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev); - /* ena_com_interrupt_moderation_supported - Return if interrupt moderation * capability is supported by the device. * @@ -926,12 +889,6 @@ void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev); */ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev); -/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt - * moderation table back to the default parameters. - * @ena_dev: ENA communication layer struct - */ -void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev); - /* ena_com_update_nonadaptive_moderation_interval_tx - Update the * non-adaptive interval in Tx direction. * @ena_dev: ENA communication layer struct @@ -968,29 +925,6 @@ unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev * */ unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev); -/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt - * moderation table. - * @ena_dev: ENA communication layer struct - * @level: Interrupt moderation table level - * @entry: Entry value - * - * Update a single entry in the interrupt moderation table. - */ -void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, - enum ena_intr_moder_level level, - struct ena_intr_moder_entry *entry); - -/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry. - * @ena_dev: ENA communication layer struct - * @level: Interrupt moderation table level - * @entry: Entry to fill. - * - * Initialize the entry according to the adaptive interrupt moderation table. - */ -void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, - enum ena_intr_moder_level level, - struct ena_intr_moder_entry *entry); - /* ena_com_config_dev_mode - Configure the placement policy of the device. * @ena_dev: ENA communication layer struct * @llq_features: LLQ feature descriptor, retrieve via @@ -1016,75 +950,6 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d ena_dev->adaptive_coalescing = false; } -/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay - * @ena_dev: ENA communication layer struct - * @pkts: Number of packets since the last update - * @bytes: Number of bytes received since the last update. - * @smoothed_interval: Returned interval - * @moder_tbl_idx: Current table level as input update new level as return - * value. - */ -static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev, - unsigned int pkts, - unsigned int bytes, - unsigned int *smoothed_interval, - unsigned int *moder_tbl_idx) -{ - enum ena_intr_moder_level curr_moder_idx, new_moder_idx; - struct ena_intr_moder_entry *curr_moder_entry; - struct ena_intr_moder_entry *pred_moder_entry; - struct ena_intr_moder_entry *new_moder_entry; - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; - unsigned int interval; - - /* We apply adaptive moderation on Rx path only. - * Tx uses static interrupt moderation. - */ - if (!pkts || !bytes) - /* Tx interrupt, or spurious interrupt, - * in both cases we just use same delay values - */ - return; - - curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx); - if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) { - pr_err("Wrong moderation index %u\n", curr_moder_idx); - return; - } - - curr_moder_entry = &intr_moder_tbl[curr_moder_idx]; - new_moder_idx = curr_moder_idx; - - if (curr_moder_idx == ENA_INTR_MODER_LOWEST) { - if ((pkts > curr_moder_entry->pkts_per_interval) || - (bytes > curr_moder_entry->bytes_per_interval)) - new_moder_idx = - (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE); - } else { - pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE]; - - if ((pkts <= pred_moder_entry->pkts_per_interval) || - (bytes <= pred_moder_entry->bytes_per_interval)) - new_moder_idx = - (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE); - else if ((pkts > curr_moder_entry->pkts_per_interval) || - (bytes > curr_moder_entry->bytes_per_interval)) { - if (curr_moder_idx != ENA_INTR_MODER_HIGHEST) - new_moder_idx = - (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE); - } - } - new_moder_entry = &intr_moder_tbl[new_moder_idx]; - - interval = new_moder_entry->intr_moder_interval; - *smoothed_interval = ( - (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT + - ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) / - 10; - - *moder_tbl_idx = new_moder_idx; -} - /* ena_com_update_intr_reg - Prepare interrupt register * @intr_reg: interrupt register to update. * @rx_delay_interval: Rx interval in usecs diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index b997c3ce9e2b..16553d92fad2 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -305,32 +305,21 @@ static int ena_get_coalesce(struct net_device *net_dev, { struct ena_adapter *adapter = netdev_priv(net_dev); struct ena_com_dev *ena_dev = adapter->ena_dev; - struct ena_intr_moder_entry intr_moder_entry; if (!ena_com_interrupt_moderation_supported(ena_dev)) { /* the devie doesn't support interrupt moderation */ return -EOPNOTSUPP; } + coalesce->tx_coalesce_usecs = - ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) / + ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * ena_dev->intr_delay_resolution; - if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) { + + if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) coalesce->rx_coalesce_usecs = ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) - / ena_dev->intr_delay_resolution; - } else { - ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry); - coalesce->rx_coalesce_usecs_low = intr_moder_entry.intr_moder_interval; - coalesce->rx_max_coalesced_frames_low = intr_moder_entry.pkts_per_interval; - - ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry); - coalesce->rx_coalesce_usecs = intr_moder_entry.intr_moder_interval; - coalesce->rx_max_coalesced_frames = intr_moder_entry.pkts_per_interval; - - ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry); - coalesce->rx_coalesce_usecs_high = intr_moder_entry.intr_moder_interval; - coalesce->rx_max_coalesced_frames_high = intr_moder_entry.pkts_per_interval; - } + * ena_dev->intr_delay_resolution; + coalesce->use_adaptive_rx_coalesce = ena_com_get_adaptive_moderation_enabled(ena_dev); @@ -348,12 +337,22 @@ static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter) adapter->tx_ring[i].smoothed_interval = val; } +static void ena_update_rx_rings_intr_moderation(struct ena_adapter *adapter) +{ + unsigned int val; + int i; + + val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev); + + for (i = 0; i < adapter->num_queues; i++) + adapter->rx_ring[i].smoothed_interval = val; +} + static int ena_set_coalesce(struct net_device *net_dev, struct ethtool_coalesce *coalesce) { struct ena_adapter *adapter = netdev_priv(net_dev); struct ena_com_dev *ena_dev = adapter->ena_dev; - struct ena_intr_moder_entry intr_moder_entry; int rc; if (!ena_com_interrupt_moderation_supported(ena_dev)) { @@ -361,22 +360,6 @@ static int ena_set_coalesce(struct net_device *net_dev, return -EOPNOTSUPP; } - if (coalesce->rx_coalesce_usecs_irq || - coalesce->rx_max_coalesced_frames_irq || - coalesce->tx_coalesce_usecs_irq || - coalesce->tx_max_coalesced_frames || - coalesce->tx_max_coalesced_frames_irq || - coalesce->stats_block_coalesce_usecs || - coalesce->use_adaptive_tx_coalesce || - coalesce->pkt_rate_low || - coalesce->tx_coalesce_usecs_low || - coalesce->tx_max_coalesced_frames_low || - coalesce->pkt_rate_high || - coalesce->tx_coalesce_usecs_high || - coalesce->tx_max_coalesced_frames_high || - coalesce->rate_sample_interval) - return -EINVAL; - rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev, coalesce->tx_coalesce_usecs); if (rc) @@ -384,37 +367,23 @@ static int ena_set_coalesce(struct net_device *net_dev, ena_update_tx_rings_intr_moderation(adapter); - if (ena_com_get_adaptive_moderation_enabled(ena_dev)) { - if (!coalesce->use_adaptive_rx_coalesce) { - ena_com_disable_adaptive_moderation(ena_dev); - rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, - coalesce->rx_coalesce_usecs); - return rc; - } - } else { /* was in non-adaptive mode */ - if (coalesce->use_adaptive_rx_coalesce) { + if (coalesce->use_adaptive_rx_coalesce) { + if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) ena_com_enable_adaptive_moderation(ena_dev); - } else { - rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, - coalesce->rx_coalesce_usecs); - return rc; - } + return 0; } - intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_low; - intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_low; - intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; - ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry); + rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, + coalesce->rx_coalesce_usecs); + if (rc) + return rc; - intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs; - intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames; - intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; - ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry); + ena_update_rx_rings_intr_moderation(adapter); - intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_high; - intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_high; - intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; - ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry); + if (!coalesce->use_adaptive_rx_coalesce) { + if (ena_com_get_adaptive_moderation_enabled(ena_dev)) + ena_com_disable_adaptive_moderation(ena_dev); + } return 0; } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 664e3ed97ea9..c487d2a7d6dd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -158,7 +158,6 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter, ring->adapter = adapter; ring->ena_dev = adapter->ena_dev; ring->per_napi_packets = 0; - ring->per_napi_bytes = 0; ring->cpu = 0; ring->first_interrupt = false; ring->no_interrupt_event_cnt = 0; @@ -196,6 +195,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter) rxr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); rxr->empty_rx_queue = 0; + adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; } } @@ -712,6 +712,7 @@ static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) for (i = 0; i < adapter->num_queues; i++) { ena_qid = ENA_IO_RXQ_IDX(i); + cancel_work_sync(&adapter->ena_napi[i].dim.work); ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); } } @@ -823,7 +824,8 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, ENA_TX_WAKEUP_THRESH); - if (netif_tx_queue_stopped(txq) && above_thresh) { + if (netif_tx_queue_stopped(txq) && above_thresh && + test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { netif_tx_wake_queue(txq); u64_stats_update_begin(&tx_ring->syncp); tx_ring->tx_stats.queue_wakeup++; @@ -832,9 +834,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) __netif_tx_unlock(txq); } - tx_ring->per_napi_bytes += tx_bytes; - tx_ring->per_napi_packets += tx_pkts; - return tx_pkts; } @@ -1118,7 +1117,6 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, } while (likely(res_budget)); work_done = budget - res_budget; - rx_ring->per_napi_bytes += total_len; rx_ring->per_napi_packets += work_done; u64_stats_update_begin(&rx_ring->syncp); rx_ring->rx_stats.bytes += total_len; @@ -1155,35 +1153,50 @@ error: return 0; } -void ena_adjust_intr_moderation(struct ena_ring *rx_ring, - struct ena_ring *tx_ring) +static void ena_dim_work(struct work_struct *w) { - /* We apply adaptive moderation on Rx path only. - * Tx uses static interrupt moderation. - */ - ena_com_calculate_interrupt_delay(rx_ring->ena_dev, - rx_ring->per_napi_packets, - rx_ring->per_napi_bytes, - &rx_ring->smoothed_interval, - &rx_ring->moder_tbl_idx); - - /* Reset per napi packets/bytes */ - tx_ring->per_napi_packets = 0; - tx_ring->per_napi_bytes = 0; + struct dim *dim = container_of(w, struct dim, work); + struct dim_cq_moder cur_moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim); + + ena_napi->rx_ring->smoothed_interval = cur_moder.usec; + dim->state = DIM_START_MEASURE; +} + +static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi) +{ + struct dim_sample dim_sample; + struct ena_ring *rx_ring = ena_napi->rx_ring; + + if (!rx_ring->per_napi_packets) + return; + + rx_ring->non_empty_napi_events++; + + dim_update_sample(rx_ring->non_empty_napi_events, + rx_ring->rx_stats.cnt, + rx_ring->rx_stats.bytes, + &dim_sample); + + net_dim(&ena_napi->dim, dim_sample); + rx_ring->per_napi_packets = 0; - rx_ring->per_napi_bytes = 0; } static void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring) { struct ena_eth_io_intr_reg intr_reg; + u32 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? + rx_ring->smoothed_interval : + ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); /* Update intr register: rx intr delay, * tx intr delay and interrupt unmask */ ena_com_update_intr_reg(&intr_reg, - rx_ring->smoothed_interval, + rx_interval, tx_ring->smoothed_interval, true); @@ -1260,9 +1273,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget) * from the interrupt context (vs from sk_busy_loop) */ if (napi_complete_done(napi, rx_work_done)) { - /* Tx and Rx share the same interrupt vector */ + /* We apply adaptive moderation on Rx path only. + * Tx uses static interrupt moderation. + */ if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) - ena_adjust_intr_moderation(rx_ring, tx_ring); + ena_adjust_adaptive_rx_intr_moderation(ena_napi); ena_unmask_interrupt(tx_ring, rx_ring); } @@ -1552,14 +1567,6 @@ static void ena_napi_enable_all(struct ena_adapter *adapter) napi_enable(&adapter->ena_napi[i].napi); } -static void ena_restore_ethtool_params(struct ena_adapter *adapter) -{ - adapter->tx_usecs = 0; - adapter->rx_usecs = 0; - adapter->tx_frames = 1; - adapter->rx_frames = 1; -} - /* Configure the Rx forwarding */ static int ena_rss_configure(struct ena_adapter *adapter) { @@ -1609,8 +1616,6 @@ static int ena_up_complete(struct ena_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); - ena_restore_ethtool_params(adapter); - ena_napi_enable_all(adapter); return 0; @@ -1740,13 +1745,16 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) rc = ena_create_io_rx_queue(adapter, i); if (rc) goto create_err; + INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work); } return 0; create_err: - while (i--) + while (i--) { + cancel_work_sync(&adapter->ena_napi[i].dim.work); ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); + } return rc; } @@ -2419,6 +2427,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT); host_info->num_cpus = num_online_cpus(); + host_info->driver_supported_features = + ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK; + rc = ena_com_set_host_attributes(ena_dev); if (rc) { if (rc == -EOPNOTSUPP) @@ -3485,10 +3496,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) calc_queue_ctx.get_feat_ctx = &get_feat_ctx; calc_queue_ctx.pdev = pdev; - /* initial Tx interrupt delay, Assumes 1 usec granularity. + /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity. * Updated during device initialization with the real granularity */ ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; + ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; + ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx); rc = ena_calc_queue_size(&calc_queue_ctx); if (rc || io_queue_num <= 0) { @@ -3618,7 +3631,6 @@ err_free_msix: ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_worker_destroy: - ena_com_destroy_interrupt_moderation(ena_dev); del_timer(&adapter->timer_service); err_netdev_destroy: free_netdev(netdev); @@ -3679,8 +3691,6 @@ static void ena_remove(struct pci_dev *pdev) pci_disable_device(pdev); - ena_com_destroy_interrupt_moderation(ena_dev); - vfree(ena_dev); } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index efbcffd22215..72ee51a82ec7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -34,6 +34,7 @@ #define ENA_H #include <linux/bitops.h> +#include <linux/dim.h> #include <linux/etherdevice.h> #include <linux/inetdevice.h> #include <linux/interrupt.h> @@ -153,6 +154,7 @@ struct ena_napi { struct ena_ring *tx_ring; struct ena_ring *rx_ring; u32 qid; + struct dim dim; }; struct ena_calc_queue_size_ctx { @@ -278,8 +280,7 @@ struct ena_ring { struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]; u32 smoothed_interval; u32 per_napi_packets; - u32 per_napi_bytes; - enum ena_intr_moder_level moder_tbl_idx; + u16 non_empty_napi_events; struct u64_stats_sync syncp; union { struct ena_stats_tx tx_stats; @@ -329,9 +330,6 @@ struct ena_adapter { u32 missing_tx_completion_threshold; - u32 tx_usecs, rx_usecs; /* interrupt moderation */ - u32 tx_frames, rx_frames; /* interrupt moderation */ - u32 requested_tx_ring_size; u32 requested_rx_ring_size; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 650d1bae5f56..1793950f0582 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1100,7 +1100,6 @@ static int au1000_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "failed to retrieve IRQ\n"); err = -ENODEV; goto out; } diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index 87ff5d6d1b22..c6c2a54c1121 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -697,16 +697,14 @@ static void ni65_free_buffer(struct priv *p) for(i=0;i<TMDNUM;i++) { kfree(p->tmdbounce[i]); #ifdef XMT_VIA_SKB - if(p->tmd_skb[i]) - dev_kfree_skb(p->tmd_skb[i]); + dev_kfree_skb(p->tmd_skb[i]); #endif } for(i=0;i<RMDNUM;i++) { #ifdef RCV_VIA_SKB - if(p->recv_skb[i]) - dev_kfree_skb(p->recv_skb[i]); + dev_kfree_skb(p->recv_skb[i]); #else kfree(p->recvbounce[i]); #endif diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index b91143947ed2..b0a6c96b6ef4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -438,7 +438,6 @@ static const struct file_operations xi2c_reg_value_fops = { void xgbe_debugfs_init(struct xgbe_prv_data *pdata) { - struct dentry *pfile; char *buf; /* Set defaults */ @@ -451,88 +450,48 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata) return; pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL); - if (!pdata->xgbe_debugfs) { - netdev_err(pdata->netdev, "debugfs_create_dir failed\n"); - kfree(buf); - return; - } - pfile = debugfs_create_file("xgmac_register", 0600, - pdata->xgbe_debugfs, pdata, - &xgmac_reg_addr_fops); - if (!pfile) - netdev_err(pdata->netdev, "debugfs_create_file failed\n"); + debugfs_create_file("xgmac_register", 0600, pdata->xgbe_debugfs, pdata, + &xgmac_reg_addr_fops); - pfile = debugfs_create_file("xgmac_register_value", 0600, - pdata->xgbe_debugfs, pdata, - &xgmac_reg_value_fops); - if (!pfile) - netdev_err(pdata->netdev, "debugfs_create_file failed\n"); + debugfs_create_file("xgmac_register_value", 0600, pdata->xgbe_debugfs, + pdata, &xgmac_reg_value_fops); - pfile = debugfs_create_file("xpcs_mmd", 0600, - pdata->xgbe_debugfs, pdata, - &xpcs_mmd_fops); - if (!pfile) - netdev_err(pdata->netdev, "debugfs_create_file failed\n"); + debugfs_create_file("xpcs_mmd", 0600, pdata->xgbe_debugfs, pdata, + &xpcs_mmd_fops); - pfile = debugfs_create_file("xpcs_register", 0600, - pdata->xgbe_debugfs, pdata, - &xpcs_reg_addr_fops); - if (!pfile) - netdev_err(pdata->netdev, "debugfs_create_file failed\n"); + debugfs_create_file("xpcs_register", 0600, pdata->xgbe_debugfs, pdata, + &xpcs_reg_addr_fops); - pfile = debugfs_create_file("xpcs_register_value", 0600, - pdata->xgbe_debugfs, pdata, - &xpcs_reg_value_fops); - if (!pfile) - netdev_err(pdata->netdev, "debugfs_create_file failed\n"); + debugfs_create_file("xpcs_register_value", 0600, pdata->xgbe_debugfs, + pdata, &xpcs_reg_value_fops); if (pdata->xprop_regs) { - pfile = debugfs_create_file("xprop_register", 0600, - pdata->xgbe_debugfs, pdata, - &xprop_reg_addr_fops); - if (!pfile) - netdev_err(pdata->netdev, - "debugfs_create_file failed\n"); - - pfile = debugfs_create_file("xprop_register_value", 0600, - pdata->xgbe_debugfs, pdata, - &xprop_reg_value_fops); - if (!pfile) - netdev_err(pdata->netdev, - "debugfs_create_file failed\n"); + debugfs_create_file("xprop_register", 0600, pdata->xgbe_debugfs, + pdata, &xprop_reg_addr_fops); + + debugfs_create_file("xprop_register_value", 0600, + pdata->xgbe_debugfs, pdata, + &xprop_reg_value_fops); } if (pdata->xi2c_regs) { - pfile = debugfs_create_file("xi2c_register", 0600, - pdata->xgbe_debugfs, pdata, - &xi2c_reg_addr_fops); - if (!pfile) - netdev_err(pdata->netdev, - "debugfs_create_file failed\n"); - - pfile = debugfs_create_file("xi2c_register_value", 0600, - pdata->xgbe_debugfs, pdata, - &xi2c_reg_value_fops); - if (!pfile) - netdev_err(pdata->netdev, - "debugfs_create_file failed\n"); + debugfs_create_file("xi2c_register", 0600, pdata->xgbe_debugfs, + pdata, &xi2c_reg_addr_fops); + + debugfs_create_file("xi2c_register_value", 0600, + pdata->xgbe_debugfs, pdata, + &xi2c_reg_value_fops); } if (pdata->vdata->an_cdr_workaround) { - pfile = debugfs_create_bool("an_cdr_workaround", 0600, - pdata->xgbe_debugfs, - &pdata->debugfs_an_cdr_workaround); - if (!pfile) - netdev_err(pdata->netdev, - "debugfs_create_bool failed\n"); - - pfile = debugfs_create_bool("an_cdr_track_early", 0600, - pdata->xgbe_debugfs, - &pdata->debugfs_an_cdr_track_early); - if (!pfile) - netdev_err(pdata->netdev, - "debugfs_create_bool failed\n"); + debugfs_create_bool("an_cdr_workaround", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_workaround); + + debugfs_create_bool("an_cdr_track_early", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_track_early); } kfree(buf); @@ -546,7 +505,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) void xgbe_debugfs_rename(struct xgbe_prv_data *pdata) { - struct dentry *pfile; char *buf; if (!pdata->xgbe_debugfs) @@ -559,11 +517,8 @@ void xgbe_debugfs_rename(struct xgbe_prv_data *pdata) if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf)) goto out; - pfile = debugfs_rename(pdata->xgbe_debugfs->d_parent, - pdata->xgbe_debugfs, - pdata->xgbe_debugfs->d_parent, buf); - if (!pfile) - netdev_err(pdata->netdev, "debugfs_rename failed\n"); + debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs, + pdata->xgbe_debugfs->d_parent, buf); out: kfree(buf); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 533094233659..230726d7b74f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -526,7 +526,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring_data *rdata; struct xgbe_packet_data *packet; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t skb_dma; unsigned int start_index, cur_index; unsigned int offset, tso, vlan, datalen, len; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 3dd0cecddba8..98f8f2033154 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1833,7 +1833,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, struct sk_buff *skb, struct xgbe_packet_data *packet) { - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int context_desc; unsigned int len; unsigned int i; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c index d0f3dfb88202..4ebd2410185a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c @@ -301,7 +301,6 @@ static int xgbe_platform_probe(struct platform_device *pdev) struct xgbe_prv_data *pdata; struct device *dev = &pdev->dev; struct platform_device *phy_pdev; - struct resource *res; const char *phy_mode; unsigned int phy_memnum, phy_irqnum; unsigned int dma_irqnum, dma_irqend; @@ -353,8 +352,7 @@ static int xgbe_platform_probe(struct platform_device *pdev) } /* Obtain the mmio areas for the device */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pdata->xgmac_regs = devm_ioremap_resource(dev, res); + pdata->xgmac_regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pdata->xgmac_regs)) { dev_err(dev, "xgmac ioremap failed\n"); ret = PTR_ERR(pdata->xgmac_regs); @@ -363,8 +361,7 @@ static int xgbe_platform_probe(struct platform_device *pdev) if (netif_msg_probe(pdata)) dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs); - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - pdata->xpcs_regs = devm_ioremap_resource(dev, res); + pdata->xpcs_regs = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(pdata->xpcs_regs)) { dev_err(dev, "xpcs ioremap failed\n"); ret = PTR_ERR(pdata->xpcs_regs); @@ -373,8 +370,8 @@ static int xgbe_platform_probe(struct platform_device *pdev) if (netif_msg_probe(pdata)) dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); - res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); - pdata->rxtx_regs = devm_ioremap_resource(dev, res); + pdata->rxtx_regs = devm_platform_ioremap_resource(phy_pdev, + phy_memnum++); if (IS_ERR(pdata->rxtx_regs)) { dev_err(dev, "rxtx ioremap failed\n"); ret = PTR_ERR(pdata->rxtx_regs); @@ -383,8 +380,8 @@ static int xgbe_platform_probe(struct platform_device *pdev) if (netif_msg_probe(pdata)) dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs); - res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); - pdata->sir0_regs = devm_ioremap_resource(dev, res); + pdata->sir0_regs = devm_platform_ioremap_resource(phy_pdev, + phy_memnum++); if (IS_ERR(pdata->sir0_regs)) { dev_err(dev, "sir0 ioremap failed\n"); ret = PTR_ERR(pdata->sir0_regs); @@ -393,8 +390,8 @@ static int xgbe_platform_probe(struct platform_device *pdev) if (netif_msg_probe(pdata)) dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs); - res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); - pdata->sir1_regs = devm_ioremap_resource(dev, res); + pdata->sir1_regs = devm_platform_ioremap_resource(phy_pdev, + phy_memnum++); if (IS_ERR(pdata->sir1_regs)) { dev_err(dev, "sir1 ioremap failed\n"); ret = PTR_ERR(pdata->sir1_regs); @@ -467,10 +464,8 @@ static int xgbe_platform_probe(struct platform_device *pdev) /* Get the device interrupt */ ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(dev, "platform_get_irq 0 failed\n"); + if (ret < 0) goto err_io; - } pdata->dev_irq = ret; /* Get the per channel DMA interrupts */ @@ -479,12 +474,8 @@ static int xgbe_platform_probe(struct platform_device *pdev) for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) { ret = platform_get_irq(pdata->platdev, dma_irqnum++); - if (ret < 0) { - netdev_err(pdata->netdev, - "platform_get_irq %u failed\n", - dma_irqnum - 1); + if (ret < 0) goto err_io; - } pdata->channel_irq[i] = ret; } @@ -496,10 +487,8 @@ static int xgbe_platform_probe(struct platform_device *pdev) /* Get the auto-negotiation interrupt */ ret = platform_get_irq(phy_pdev, phy_irqnum++); - if (ret < 0) { - dev_err(dev, "platform_get_irq phy 0 failed\n"); + if (ret < 0) goto err_io; - } pdata->an_irq = ret; /* Configure the netdev resource */ diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 79048cc46703..02b4f3af02b5 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -54,10 +54,8 @@ static int xge_get_resources(struct xge_pdata *pdata) } ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(dev, "Unable to get irq\n"); + if (ret < 0) return ret; - } pdata->resources.irq = ret; return 0; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 61a465097cb8..5f657879134e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -712,11 +712,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) udelay(5); } else { #ifdef CONFIG_ACPI - if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) { - acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), - "_RST", NULL, NULL); - } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), - "_INI")) { + acpi_status status; + + status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_RST", NULL, NULL); + if (ACPI_FAILURE(status)) { acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), "_INI", NULL, NULL); } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 10b1c053e70a..d8612131c55e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -340,7 +340,8 @@ static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo) nr_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < 2 && i < nr_frags; i++) - len += skb_shinfo(skb)->frags[i].size; + len += skb_frag_size( + &skb_shinfo(skb)->frags[i]); /* HW requires header must reside in 3 buffer */ if (unlikely(hdr_len > len)) { @@ -1616,7 +1617,6 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) { struct platform_device *pdev = pdata->pdev; - struct device *dev = &pdev->dev; int i, ret, max_irqs; if (phy_interface_mode_is_rgmii(pdata->phy_mode)) @@ -1636,9 +1636,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) pdata->cq_cnt = max_irqs / 2; break; } - dev_err(dev, "Unable to get ENET IRQ\n"); - ret = ret ? : -ENXIO; - return ret; + return ret ? : -ENXIO; } pdata->irqs[i] = ret; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index 6453fc2ebb1f..f482ced2cadd 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -460,12 +460,14 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p) } } else { #ifdef CONFIG_ACPI - if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST")) - acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), - "_RST", NULL, NULL); - else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI")) + acpi_status status; + + status = acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), + "_RST", NULL, NULL); + if (ACPI_FAILURE(status)) { acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), "_INI", NULL, NULL); + } #endif } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 133eb91c542e..304b5d43f236 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -393,11 +393,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) udelay(5); } else { #ifdef CONFIG_ACPI - if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) { - acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), - "_RST", NULL, NULL); - } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), - "_INI")) { + acpi_status status; + + status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_RST", NULL, NULL); + if (ACPI_FAILURE(status)) { acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), "_INI", NULL, NULL); } diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index c40daad515d5..a58185b1d8bf 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -815,8 +815,8 @@ static int reverse6[64] = { static unsigned int crc416(unsigned int curval, unsigned short nxtval) { - register unsigned int counter, cur = curval, next = nxtval; - register int high_crc_set, low_data_set; + unsigned int counter, cur = curval, next = nxtval; + int high_crc_set, low_data_set; /* Swap bytes */ next = ((next & 0x00FF) << 8) | (next >> 8); diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 6703960c7cf5..7548247455d7 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1148,7 +1148,7 @@ static int ag71xx_rings_init(struct ag71xx *ag) return -ENOMEM; } - rx->buf = &tx->buf[BIT(tx->order)]; + rx->buf = &tx->buf[tx_size]; rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; @@ -1686,7 +1686,7 @@ static int ag71xx_probe(struct platform_device *pdev) } ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start, - res->end - res->start + 1); + resource_size(res)); if (!ag->mac_base) { err = -ENOMEM; goto err_free; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index e3538ba7d0e7..d4bbcdfd691a 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1465,9 +1465,7 @@ static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb) tpd->len = cpu_to_le16(maplen); for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; if (++txq->write_idx == txq->count) txq->write_idx = 0; @@ -1879,8 +1877,7 @@ static void alx_remove(struct pci_dev *pdev) #ifdef CONFIG_PM_SLEEP static int alx_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_priv *alx = dev_get_drvdata(dev); if (!netif_running(alx->dev)) return 0; @@ -1891,8 +1888,7 @@ static int alx_suspend(struct device *dev) static int alx_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_priv *alx = dev_get_drvdata(dev); struct alx_hw *hw = &alx->hw; int err; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index be7f9cebb675..2b239ecea05f 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2150,9 +2150,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter, } for (f = 0; f < nr_frags; f++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; use_tpd = atl1c_get_tpd(adapter, type); memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); @@ -2422,8 +2420,7 @@ static int atl1c_close(struct net_device *netdev) static int atl1c_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; u32 wufc = adapter->wol; @@ -2437,7 +2434,7 @@ static int atl1c_suspend(struct device *dev) if (wufc) if (atl1c_phy_to_ps_link(hw) != 0) - dev_dbg(&pdev->dev, "phy power saving failed"); + dev_dbg(dev, "phy power saving failed"); atl1c_power_saving(hw, wufc); @@ -2447,8 +2444,7 @@ static int atl1c_suspend(struct device *dev) #ifdef CONFIG_PM_SLEEP static int atl1c_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct atl1c_adapter *adapter = netdev_priv(netdev); AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 7f14e010bfeb..4f7b65825c15 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1770,11 +1770,10 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; u16 i; u16 seg_num; - frag = &skb_shinfo(skb)->frags[f]; buf_len = skb_frag_size(frag); seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index b5c6dc914720..b498fd6a47d0 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2256,10 +2256,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; u16 i, nseg; - frag = &skb_shinfo(skb)->frags[f]; buf_len = skb_frag_size(frag); nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / @@ -2754,8 +2753,7 @@ static int atl1_close(struct net_device *netdev) #ifdef CONFIG_PM_SLEEP static int atl1_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; u32 ctrl = 0; @@ -2780,7 +2778,7 @@ static int atl1_suspend(struct device *dev) val = atl1_get_speed_and_duplex(hw, &speed, &duplex); if (val) { if (netif_msg_ifdown(adapter)) - dev_printk(KERN_DEBUG, &pdev->dev, + dev_printk(KERN_DEBUG, dev, "error getting speed/duplex\n"); goto disable_wol; } @@ -2837,8 +2835,7 @@ static int atl1_suspend(struct device *dev) static int atl1_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct atl1_adapter *adapter = netdev_priv(netdev); iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index 3b3370a94a9c..37752d9514e7 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -1351,10 +1351,8 @@ static int nb8800_probe(struct platform_device *pdev) ops = match->data; irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(&pdev->dev, "No IRQ\n"); + if (irq <= 0) return -EINVAL; - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 291e4afd4a1a..620cd3fc1fbc 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1693,7 +1693,7 @@ static int bcm_enet_probe(struct platform_device *pdev) struct bcm_enet_priv *priv; struct net_device *dev; struct bcm63xx_enet_platform_data *pd; - struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; + struct resource *res_irq, *res_irq_rx, *res_irq_tx; struct mii_bus *bus; int i, ret; @@ -1719,8 +1719,7 @@ static int bcm_enet_probe(struct platform_device *pdev) if (ret) goto out; - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, res_mem); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto out; @@ -2762,15 +2761,13 @@ struct platform_driver bcm63xx_enetsw_driver = { /* reserve & remap memory space shared between all macs */ static int bcm_enet_shared_probe(struct platform_device *pdev) { - struct resource *res; void __iomem *p[3]; unsigned int i; memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); for (i = 0; i < 3; i++) { - res = platform_get_resource(pdev, IORESOURCE_MEM, i); - p[i] = devm_ioremap_resource(&pdev->dev, res); + p[i] = devm_platform_ioremap_resource(pdev, i); if (IS_ERR(p[i])) return PTR_ERR(p[i]); } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 9483553ce444..7df887e4024c 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -708,8 +708,7 @@ static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) for (i = 0; i < priv->num_rx_bds; i++) { cb = &priv->rx_cbs[i]; skb = bcm_sysport_rx_refill(priv, cb); - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); if (!cb->skb) return -ENOMEM; } @@ -2420,12 +2419,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) struct device_node *dn; struct net_device *dev; const void *macaddr; - struct resource *r; u32 txq, rxq; int ret; dn = pdev->dev.of_node; - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); of_id = of_match_node(bcm_sysport_of_match, dn); if (!of_id || !of_id->data) return -EINVAL; @@ -2473,7 +2470,7 @@ static int bcm_sysport_probe(struct platform_device *pdev) goto err_free_netdev; } - priv->base = devm_ioremap_resource(&pdev->dev, r); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto err_free_netdev; diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 6dc0dd91ad11..c46c1b1416f7 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -199,10 +199,8 @@ static int bgmac_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "MAC address not present in device tree\n"); bgmac->irq = platform_get_irq(pdev, 0); - if (bgmac->irq < 0) { - dev_err(&pdev->dev, "Unable to obtain IRQ\n"); + if (bgmac->irq < 0) return bgmac->irq; - } regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base"); if (!regs) { diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 4632dd5dbad1..148734b166f0 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -172,7 +172,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, flags = 0; for (i = 0; i < nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int len = skb_frag_size(frag); index = (index + 1) % BGMAC_TX_RING_SLOTS; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index dfdd14eadd57..fbc196b480b6 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8673,8 +8673,7 @@ bnx2_remove_one(struct pci_dev *pdev) static int bnx2_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnx2 *bp = netdev_priv(dev); if (netif_running(dev)) { @@ -8693,8 +8692,7 @@ bnx2_suspend(struct device *device) static int bnx2_resume(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnx2 *bp = netdev_priv(dev); if (!netif_running(dev)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8dce4069472b..b4a8cf620a0c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -116,6 +116,9 @@ enum board_idx { BCM57508, BCM57504, BCM57502, + BCM57508_NPAR, + BCM57504_NPAR, + BCM57502_NPAR, BCM58802, BCM58804, BCM58808, @@ -161,6 +164,9 @@ static const struct { [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, + [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, + [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, + [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, @@ -209,6 +215,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, + { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, #ifdef CONFIG_BNXT_SRIOV @@ -242,6 +254,8 @@ static const u16 bnxt_async_events_arr[] = { ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, + ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, }; static struct workqueue_struct *bnxt_pf_wq; @@ -828,16 +842,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, return 0; } -static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons, - u32 agg_bufs) +static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + u16 cp_cons, u16 curr) +{ + struct rx_agg_cmp *agg; + + cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + return agg; +} + +static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 agg_id, u16 curr) +{ + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; + + return &tpa_info->agg_arr[curr]; +} + +static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, + u16 start, u32 agg_bufs, bool tpa) { struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt *bp = bnapi->bp; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; u16 sw_prod = rxr->rx_sw_agg_prod; + bool p5_tpa = false; u32 i; + if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) + p5_tpa = true; + for (i = 0; i < agg_bufs; i++) { u16 cons; struct rx_agg_cmp *agg; @@ -845,8 +884,10 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons, struct rx_bd *prod_bd; struct page *page; - agg = (struct rx_agg_cmp *) - &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + if (p5_tpa) + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); + else + agg = bnxt_get_agg(bp, cpr, idx, start + i); cons = agg->rx_agg_cmp_opaque; __clear_bit(cons, rxr->rx_agg_bmap); @@ -874,7 +915,6 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons, prod = NEXT_RX_AGG(prod); sw_prod = NEXT_RX_AGG(sw_prod); - cp_cons = NEXT_CMP(cp_cons); } rxr->rx_agg_prod = prod; rxr->rx_sw_agg_prod = sw_prod; @@ -888,7 +928,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, { unsigned int payload = offset_and_len >> 16; unsigned int len = offset_and_len & 0xffff; - struct skb_frag_struct *frag; + skb_frag_t *frag; struct page *page = data; u16 prod = rxr->rx_prod; struct sk_buff *skb; @@ -919,7 +959,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, frag = &skb_shinfo(skb)->frags[0]; skb_frag_size_sub(frag, payload); - frag->page_offset += payload; + skb_frag_off_add(frag, payload); skb->data_len -= payload; skb->tail += payload; @@ -957,15 +997,19 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - struct sk_buff *skb, u16 cp_cons, - u32 agg_bufs) + struct sk_buff *skb, u16 idx, + u32 agg_bufs, bool tpa) { struct bnxt_napi *bnapi = cpr->bnapi; struct pci_dev *pdev = bp->pdev; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; + bool p5_tpa = false; u32 i; + if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) + p5_tpa = true; + for (i = 0; i < agg_bufs; i++) { u16 cons, frag_len; struct rx_agg_cmp *agg; @@ -973,8 +1017,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct page *page; dma_addr_t mapping; - agg = (struct rx_agg_cmp *) - &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + if (p5_tpa) + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); + else + agg = bnxt_get_agg(bp, cpr, idx, i); cons = agg->rx_agg_cmp_opaque; frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; @@ -1008,7 +1054,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, * allocated already. */ rxr->rx_agg_prod = prod; - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i); + bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); return NULL; } @@ -1021,7 +1067,6 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, skb->truesize += PAGE_SIZE; prod = NEXT_RX_AGG(prod); - cp_cons = NEXT_CMP(cp_cons); } rxr->rx_agg_prod = prod; return skb; @@ -1081,9 +1126,10 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { struct rx_tpa_end_cmp *tpa_end = cmp; - agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_AGG_BUFS) >> - RX_TPA_END_CMP_AGG_BUFS_SHIFT; + if (bp->flags & BNXT_FLAG_CHIP_P5) + return 0; + + agg_bufs = TPA_END_AGG_BUFS(tpa_end); } if (agg_bufs) { @@ -1094,6 +1140,14 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, return 0; } +static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) +{ + if (BNXT_PF(bp)) + queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); + else + schedule_delayed_work(&bp->fw_reset_task, delay); +} + static void bnxt_queue_sp_work(struct bnxt *bp) { if (BNXT_PF(bp)) @@ -1120,26 +1174,60 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) rxr->rx_next_cons = 0xffff; } +static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + u16 idx = agg_id & MAX_TPA_P5_MASK; + + if (test_bit(idx, map->agg_idx_bmap)) + idx = find_first_zero_bit(map->agg_idx_bmap, + BNXT_AGG_IDX_BMAP_SIZE); + __set_bit(idx, map->agg_idx_bmap); + map->agg_id_tbl[agg_id] = idx; + return idx; +} + +static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + + __clear_bit(idx, map->agg_idx_bmap); +} + +static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + + return map->agg_id_tbl[agg_id]; +} + static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, struct rx_tpa_start_cmp *tpa_start, struct rx_tpa_start_cmp_ext *tpa_start1) { - u8 agg_id = TPA_START_AGG_ID(tpa_start); - u16 cons, prod; - struct bnxt_tpa_info *tpa_info; struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; + struct bnxt_tpa_info *tpa_info; + u16 cons, prod, agg_id; struct rx_bd *prod_bd; dma_addr_t mapping; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + agg_id = TPA_START_AGG_ID_P5(tpa_start); + agg_id = bnxt_alloc_agg_idx(rxr, agg_id); + } else { + agg_id = TPA_START_AGG_ID(tpa_start); + } cons = tpa_start->rx_tpa_start_cmp_opaque; prod = rxr->rx_prod; cons_rx_buf = &rxr->rx_buf_ring[cons]; prod_rx_buf = &rxr->rx_buf_ring[prod]; tpa_info = &rxr->rx_tpa[agg_id]; - if (unlikely(cons != rxr->rx_next_cons)) { - netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", - cons, rxr->rx_next_cons); + if (unlikely(cons != rxr->rx_next_cons || + TPA_START_ERROR(tpa_start))) { + netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", + cons, rxr->rx_next_cons, + TPA_START_ERROR_CODE(tpa_start1)); bnxt_sched_reset(bp, rxr); return; } @@ -1184,6 +1272,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); + tpa_info->agg_count = 0; rxr->rx_prod = NEXT_RX(prod); cons = NEXT_RX(cons); @@ -1195,12 +1284,36 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, cons_rx_buf->data = NULL; } -static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons, - u32 agg_bufs) +static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) { if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); +} + +#ifdef CONFIG_INET +static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) +{ + struct udphdr *uh = NULL; + + if (ip_proto == htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + if (iph->protocol == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + + if (iph->nexthdr == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } + if (uh) { + if (uh->check) + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; + else + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; + } } +#endif static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, int payload_off, int tcp_ts, @@ -1259,28 +1372,39 @@ static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, } if (inner_mac_off) { /* tunnel */ - struct udphdr *uh = NULL; __be16 proto = *((__be16 *)(skb->data + outer_ip_off - ETH_HLEN - 2)); - if (proto == htons(ETH_P_IP)) { - struct iphdr *iph = (struct iphdr *)skb->data; + bnxt_gro_tunnel(skb, proto); + } +#endif + return skb; +} - if (iph->protocol == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } else { - struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; +static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, + int payload_off, int tcp_ts, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + u16 outer_ip_off, inner_ip_off, inner_mac_off; + u32 hdr_info = tpa_info->hdr_info; + int iphdr_len, nw_off; - if (iph->nexthdr == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } - if (uh) { - if (uh->check) - skb_shinfo(skb)->gso_type |= - SKB_GSO_UDP_TUNNEL_CSUM; - else - skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; - } + inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); + inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); + outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); + + nw_off = inner_ip_off - ETH_HLEN; + skb_set_network_header(skb, nw_off); + iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? + sizeof(struct ipv6hdr) : sizeof(struct iphdr); + skb_set_transport_header(skb, nw_off + iphdr_len); + + if (inner_mac_off) { /* tunnel */ + __be16 proto = *((__be16 *)(skb->data + outer_ip_off - + ETH_HLEN - 2)); + + bnxt_gro_tunnel(skb, proto); } #endif return skb; @@ -1327,28 +1451,8 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, return NULL; } - if (nw_off) { /* tunnel */ - struct udphdr *uh = NULL; - - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = (struct iphdr *)skb->data; - - if (iph->protocol == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } else { - struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; - - if (iph->nexthdr == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } - if (uh) { - if (uh->check) - skb_shinfo(skb)->gso_type |= - SKB_GSO_UDP_TUNNEL_CSUM; - else - skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; - } - } + if (nw_off) /* tunnel */ + bnxt_gro_tunnel(skb, skb->protocol); #endif return skb; } @@ -1371,9 +1475,10 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, skb_shinfo(skb)->gso_size = le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); skb_shinfo(skb)->gso_type = tpa_info->gso_type; - payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_PAYLOAD_OFFSET) >> - RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; + if (bp->flags & BNXT_FLAG_CHIP_P5) + payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); + else + payload_off = TPA_END_PAYLOAD_OFF(tpa_end); skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); if (likely(skb)) tcp_gro_complete(skb); @@ -1401,14 +1506,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, { struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - u8 agg_id = TPA_END_AGG_ID(tpa_end); u8 *data_ptr, agg_bufs; - u16 cp_cons = RING_CMP(*raw_cons); unsigned int len; struct bnxt_tpa_info *tpa_info; dma_addr_t mapping; struct sk_buff *skb; + u16 idx = 0, agg_id; void *data; + bool gro; if (unlikely(bnapi->in_reset)) { int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); @@ -1418,26 +1523,43 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return NULL; } - tpa_info = &rxr->rx_tpa[agg_id]; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + agg_id = TPA_END_AGG_ID_P5(tpa_end); + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); + agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); + tpa_info = &rxr->rx_tpa[agg_id]; + if (unlikely(agg_bufs != tpa_info->agg_count)) { + netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", + agg_bufs, tpa_info->agg_count); + agg_bufs = tpa_info->agg_count; + } + tpa_info->agg_count = 0; + *event |= BNXT_AGG_EVENT; + bnxt_free_agg_idx(rxr, agg_id); + idx = agg_id; + gro = !!(bp->flags & BNXT_FLAG_GRO); + } else { + agg_id = TPA_END_AGG_ID(tpa_end); + agg_bufs = TPA_END_AGG_BUFS(tpa_end); + tpa_info = &rxr->rx_tpa[agg_id]; + idx = RING_CMP(*raw_cons); + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) + return ERR_PTR(-EBUSY); + + *event |= BNXT_AGG_EVENT; + idx = NEXT_CMP(idx); + } + gro = !!TPA_END_GRO(tpa_end); + } data = tpa_info->data; data_ptr = tpa_info->data_ptr; prefetch(data_ptr); len = tpa_info->len; mapping = tpa_info->mapping; - agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; - - if (agg_bufs) { - if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) - return ERR_PTR(-EBUSY); - - *event |= BNXT_AGG_EVENT; - cp_cons = NEXT_CMP(cp_cons); - } - if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); if (agg_bufs > MAX_SKB_FRAGS) netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", agg_bufs, (int)MAX_SKB_FRAGS); @@ -1447,7 +1569,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (len <= bp->rx_copy_thresh) { skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } } else { @@ -1456,7 +1578,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); if (!new_data) { - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } @@ -1471,7 +1593,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!skb) { kfree(data); - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } skb_reserve(skb, bp->rx_offset); @@ -1479,7 +1601,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs); + skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ return NULL; @@ -1508,12 +1630,24 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; } - if (TPA_END_GRO(tpa_end)) + if (gro) skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); return skb; } +static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + struct rx_agg_cmp *rx_agg) +{ + u16 agg_id = TPA_AGG_AGG_ID(rx_agg); + struct bnxt_tpa_info *tpa_info; + + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); + tpa_info = &rxr->rx_tpa[agg_id]; + BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); + tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; +} + static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, struct sk_buff *skb) { @@ -1555,6 +1689,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rxcmp = (struct rx_cmp *) &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + cmp_type = RX_CMP_TYPE(rxcmp); + + if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { + bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); + goto next_rx_no_prod_no_len; + } + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); cp_cons = RING_CMP(tmp_raw_cons); rxcmp1 = (struct rx_cmp_ext *) @@ -1563,8 +1704,6 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) return -EBUSY; - cmp_type = RX_CMP_TYPE(rxcmp); - prod = rxr->rx_prod; if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { @@ -1623,7 +1762,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, bnxt_reuse_rx_data(rxr, cons, data); if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, + false); rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { @@ -1646,7 +1786,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, + agg_bufs, false); rc = -ENOMEM; goto next_rx; } @@ -1666,7 +1807,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs); + skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); if (!skb) { rc = -ENOMEM; goto next_rx; @@ -1765,6 +1906,33 @@ static int bnxt_force_rx_discard(struct bnxt *bp, return bnxt_rx_pkt(bp, cpr, raw_cons, event); } +u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg = fw_health->regs[reg_idx]; + u32 reg_type, reg_off, val = 0; + + reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); + reg_off = BNXT_FW_HEALTH_REG_OFF(reg); + switch (reg_type) { + case BNXT_FW_HEALTH_REG_TYPE_CFG: + pci_read_config_dword(bp->pdev, reg_off, &val); + break; + case BNXT_FW_HEALTH_REG_TYPE_GRC: + reg_off = fw_health->mapped_regs[reg_idx]; + /* fall through */ + case BNXT_FW_HEALTH_REG_TYPE_BAR0: + val = readl(bp->bar0 + reg_off); + break; + case BNXT_FW_HEALTH_REG_TYPE_BAR1: + val = readl(bp->bar1 + reg_off); + break; + } + if (reg_idx == BNXT_FW_RESET_INPROG_REG) + val &= fw_health->fw_reset_inprog_reg_mask; + return val; +} + #define BNXT_GET_EVENT_PORT(data) \ ((data) & \ ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) @@ -1820,6 +1988,55 @@ static int bnxt_async_event_process(struct bnxt *bp, goto async_event_process_exit; set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); break; + case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { + u32 data1 = le32_to_cpu(cmpl->event_data1); + + bp->fw_reset_timestamp = jiffies; + bp->fw_reset_min_dsecs = cmpl->timestamp_lo; + if (!bp->fw_reset_min_dsecs) + bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; + bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); + if (!bp->fw_reset_max_dsecs) + bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; + if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { + netdev_warn(bp->dev, "Firmware fatal reset event received\n"); + set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + } else { + netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n", + bp->fw_reset_max_dsecs * 100); + } + set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); + break; + } + case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 data1 = le32_to_cpu(cmpl->event_data1); + + if (!fw_health) + goto async_event_process_exit; + + fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1); + fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); + if (!fw_health->enabled) + break; + + if (netif_msg_drv(bp)) + netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n", + fw_health->enabled, fw_health->master, + bnxt_fw_health_readl(bp, + BNXT_FW_RESET_CNT_REG), + bnxt_fw_health_readl(bp, + BNXT_FW_HEALTH_REG)); + fw_health->tmr_multiplier = + DIV_ROUND_UP(fw_health->polling_dsecs * HZ, + bp->current_interval * 10); + fw_health->tmr_counter = fw_health->tmr_multiplier; + fw_health->last_fw_heartbeat = + bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); + fw_health->last_fw_reset_cnt = + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + goto async_event_process_exit; + } default: goto async_event_process_exit; } @@ -2325,10 +2542,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_tpa_idx_map *map; int j; if (rxr->rx_tpa) { - for (j = 0; j < MAX_TPA; j++) { + for (j = 0; j < bp->max_tpa; j++) { struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[j]; u8 *data = tpa_info->data; @@ -2395,6 +2613,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) __free_page(rxr->rx_page); rxr->rx_page = NULL; } + map = rxr->rx_tpa_idx_map; + if (map) + memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); } } @@ -2483,6 +2704,61 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) return 0; } +static void bnxt_free_tpa_info(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + + kfree(rxr->rx_tpa_idx_map); + rxr->rx_tpa_idx_map = NULL; + if (rxr->rx_tpa) { + kfree(rxr->rx_tpa[0].agg_arr); + rxr->rx_tpa[0].agg_arr = NULL; + } + kfree(rxr->rx_tpa); + rxr->rx_tpa = NULL; + } +} + +static int bnxt_alloc_tpa_info(struct bnxt *bp) +{ + int i, j, total_aggs = 0; + + bp->max_tpa = MAX_TPA; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (!bp->max_tpa_v2) + return 0; + bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); + total_aggs = bp->max_tpa * MAX_SKB_FRAGS; + } + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct rx_agg_cmp *agg; + + rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), + GFP_KERNEL); + if (!rxr->rx_tpa) + return -ENOMEM; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + continue; + agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL); + rxr->rx_tpa[0].agg_arr = agg; + if (!agg) + return -ENOMEM; + for (j = 1; j < bp->max_tpa; j++) + rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; + rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), + GFP_KERNEL); + if (!rxr->rx_tpa_idx_map) + return -ENOMEM; + } + return 0; +} + static void bnxt_free_rx_rings(struct bnxt *bp) { int i; @@ -2490,6 +2766,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp) if (!bp->rx_ring) return; + bnxt_free_tpa_info(bp); for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; @@ -2503,9 +2780,6 @@ static void bnxt_free_rx_rings(struct bnxt *bp) page_pool_destroy(rxr->page_pool); rxr->page_pool = NULL; - kfree(rxr->rx_tpa); - rxr->rx_tpa = NULL; - kfree(rxr->rx_agg_bmap); rxr->rx_agg_bmap = NULL; @@ -2539,7 +2813,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, static int bnxt_alloc_rx_rings(struct bnxt *bp) { - int i, rc, agg_rings = 0, tpa_rings = 0; + int i, rc = 0, agg_rings = 0; if (!bp->rx_ring) return -ENOMEM; @@ -2547,9 +2821,6 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) if (bp->flags & BNXT_FLAG_AGG_RINGS) agg_rings = 1; - if (bp->flags & BNXT_FLAG_TPA) - tpa_rings = 1; - for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; @@ -2591,17 +2862,11 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); if (!rxr->rx_agg_bmap) return -ENOMEM; - - if (tpa_rings) { - rxr->rx_tpa = kcalloc(MAX_TPA, - sizeof(struct bnxt_tpa_info), - GFP_KERNEL); - if (!rxr->rx_tpa) - return -ENOMEM; - } } } - return 0; + if (bp->flags & BNXT_FLAG_TPA) + rc = bnxt_alloc_tpa_info(bp); + return rc; } static void bnxt_free_tx_rings(struct bnxt *bp) @@ -2953,7 +3218,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) u8 *data; dma_addr_t mapping; - for (i = 0; i < MAX_TPA; i++) { + for (i = 0; i < bp->max_tpa; i++) { data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL); if (!data) @@ -3376,6 +3641,9 @@ static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) { struct pci_dev *pdev = bp->pdev; + if (bp->hwrm_cmd_kong_resp_addr) + return 0; + bp->hwrm_cmd_kong_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &bp->hwrm_cmd_kong_resp_dma_addr, @@ -3415,6 +3683,9 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) { struct pci_dev *pdev = bp->pdev; + if (bp->hwrm_short_cmd_req_addr) + return 0; + bp->hwrm_short_cmd_req_addr = dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, &bp->hwrm_short_cmd_req_dma_addr, @@ -3468,7 +3739,7 @@ static void bnxt_free_ring_stats(struct bnxt *bp) if (!bp->bnapi) return; - size = sizeof(struct ctx_hw_stats); + size = bp->hw_ring_stats_size; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; @@ -3487,7 +3758,7 @@ static int bnxt_alloc_stats(struct bnxt *bp) u32 size, i; struct pci_dev *pdev = bp->pdev; - size = sizeof(struct ctx_hw_stats); + size = bp->hw_ring_stats_size; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; @@ -3869,6 +4140,32 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); } +static int bnxt_hwrm_to_stderr(u32 hwrm_err) +{ + switch (hwrm_err) { + case HWRM_ERR_CODE_SUCCESS: + return 0; + case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: + return -EACCES; + case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: + return -ENOSPC; + case HWRM_ERR_CODE_INVALID_PARAMS: + case HWRM_ERR_CODE_INVALID_FLAGS: + case HWRM_ERR_CODE_INVALID_ENABLES: + case HWRM_ERR_CODE_UNSUPPORTED_TLV: + case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: + return -EINVAL; + case HWRM_ERR_CODE_NO_BUFFER: + return -ENOMEM; + case HWRM_ERR_CODE_HOT_RESET_PROGRESS: + return -EAGAIN; + case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + default: + return -EIO; + } +} + static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, int timeout, bool silent) { @@ -3886,6 +4183,9 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; u16 dst = BNXT_HWRM_CHNL_CHIMP; + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + return -EBUSY; + if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { if (msg_len > bp->hwrm_max_ext_req_len || !bp->hwrm_short_cmd_req_addr) @@ -3950,6 +4250,9 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, /* Ring channel doorbell */ writel(1, bp->bar0 + doorbell_offset); + if (!pci_is_enabled(bp->pdev)) + return 0; + if (!timeout) timeout = DFLT_HWRM_CMD_TIMEOUT; /* convert timeout to usec */ @@ -3981,9 +4284,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } if (bp->hwrm_intr_seq_id != (u16)~seq_id) { - netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", - le16_to_cpu(req->req_type)); - return -1; + if (!silent) + netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", + le16_to_cpu(req->req_type)); + return -EBUSY; } len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; @@ -4007,11 +4311,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } if (i >= tmo_count) { - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len); - return -1; + if (!silent) + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", + HWRM_TOTAL_TIMEOUT(i), + le16_to_cpu(req->req_type), + le16_to_cpu(req->seq_id), len); + return -EBUSY; } /* Last byte of resp contains valid bit */ @@ -4025,11 +4330,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } if (j >= HWRM_VALID_BIT_DELAY_USEC) { - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len, *valid); - return -1; + if (!silent) + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", + HWRM_TOTAL_TIMEOUT(i), + le16_to_cpu(req->req_type), + le16_to_cpu(req->seq_id), len, + *valid); + return -EBUSY; } } @@ -4043,7 +4350,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", le16_to_cpu(resp->req_type), le16_to_cpu(resp->seq_id), rc); - return rc; + return bnxt_hwrm_to_stderr(rc); } int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) @@ -4092,9 +4399,14 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); memset(async_events_bmap, 0, sizeof(async_events_bmap)); - for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) - __set_bit(bnxt_async_events_arr[i], async_events_bmap); + for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { + u16 event_id = bnxt_async_events_arr[i]; + if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + continue; + __set_bit(bnxt_async_events_arr[i], async_events_bmap); + } if (bmap && bmap_size) { for (i = 0; i < bmap_size; i++) { if (test_bit(i, bmap)) @@ -4112,6 +4424,7 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) { struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_drv_rgtr_input req = {0}; + u32 flags; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); @@ -4121,7 +4434,11 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) FUNC_DRV_RGTR_REQ_ENABLES_VER); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); - req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE); + flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE | + FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT; + req.flags = cpu_to_le32(flags); req.ver_maj_8b = DRV_VER_MAJ; req.ver_min_8b = DRV_VER_MIN; req.ver_upd_8b = DRV_VER_UPD; @@ -4156,10 +4473,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - rc = -EIO; - else if (resp->flags & - cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) + if (!rc && (resp->flags & + cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))) bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -4414,6 +4729,7 @@ static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; struct hwrm_vnic_tpa_cfg_input req = {0}; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) @@ -4453,9 +4769,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) nsegs = (MAX_SKB_FRAGS - n) / n; } - segs = ilog2(nsegs); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + segs = MAX_TPA_SEGS_P5; + max_aggs = bp->max_tpa; + } else { + segs = ilog2(nsegs); + } req.max_agg_segs = cpu_to_le16(segs); - req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); + req.max_aggs = cpu_to_le16(max_aggs); req.min_agg_len = cpu_to_le32(512); } @@ -4576,7 +4897,7 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) } rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) - return -EIO; + return rc; } return 0; } @@ -4739,8 +5060,6 @@ static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - return rc; bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; } return rc; @@ -4800,6 +5119,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) struct hwrm_vnic_qcaps_input req = {0}; int rc; + bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); + bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP); if (bp->hwrm_spec_code < 0x10600) return 0; @@ -4815,6 +5136,10 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (flags & VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; + bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); + if (bp->max_tpa_v2) + bp->hw_ring_stats_size = + sizeof(struct ctx_hw_stats_ext); } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -4874,8 +5199,6 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - break; bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; } mutex_unlock(&bp->hwrm_cmd_lock); @@ -5194,6 +5517,9 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; u16 error_code; + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); req.ring_type = ring_type; req.ring_id = cpu_to_le16(ring->fw_ring_id); @@ -5331,7 +5657,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) { mutex_unlock(&bp->hwrm_cmd_lock); - return -EIO; + return rc; } hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); @@ -5495,7 +5821,7 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) - return -ENOMEM; + return rc; if (bp->hwrm_spec_code < 0x10601) bp->hw_resc.resv_tx_rings = tx_rings; @@ -5520,7 +5846,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, cp_rings, stats, vnics); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) - return -ENOMEM; + return rc; rc = bnxt_hwrm_get_rings(bp); return rc; @@ -5701,9 +6027,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, req.flags = cpu_to_le32(flags); rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - return -ENOMEM; - return 0; + return rc; } static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, @@ -5731,9 +6055,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, req.flags = cpu_to_le32(flags); rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - return -ENOMEM; - return 0; + return rc; } static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, @@ -5995,8 +6317,6 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - break; cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } @@ -6016,6 +6336,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); + req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); mutex_lock(&bp->hwrm_cmd_lock); @@ -6057,6 +6378,8 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) struct bnxt_vf_info *vf = &bp->vf; vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; + } else { + bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); } #endif flags = le16_to_cpu(resp->flags); @@ -6292,8 +6615,6 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) } req.flags = cpu_to_le32(flags); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - rc = -EIO; return rc; } @@ -6555,10 +6876,8 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) { - rc = -EIO; + if (rc) goto hwrm_func_resc_qcaps_exit; - } hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); if (!all) @@ -6626,6 +6945,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; + if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; + if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) + bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; bp->tx_push_thresh = 0; if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) @@ -6657,6 +6980,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); + bp->flags &= ~BNXT_FLAG_WOL_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) bp->flags |= BNXT_FLAG_WOL_CAP; } else { @@ -6726,6 +7050,103 @@ hwrm_cfa_adv_qcaps_exit: return rc; } +static int bnxt_map_fw_health_regs(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg_base = 0xffffffff; + int i; + + /* Only pre-map the monitoring GRC registers using window 3 */ + for (i = 0; i < 4; i++) { + u32 reg = fw_health->regs[i]; + + if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) + continue; + if (reg_base == 0xffffffff) + reg_base = reg & BNXT_GRC_BASE_MASK; + if ((reg & BNXT_GRC_BASE_MASK) != reg_base) + return -ERANGE; + fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE + + (reg & BNXT_GRC_OFFSET_MASK); + } + if (reg_base == 0xffffffff) + return 0; + + writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + + BNXT_FW_HEALTH_WIN_MAP_OFF); + return 0; +} + +static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) +{ + struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_fw_health *fw_health = bp->fw_health; + struct hwrm_error_recovery_qcfg_input req = {0}; + int rc, i; + + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto err_recovery_out; + if (!fw_health) { + fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL); + bp->fw_health = fw_health; + if (!fw_health) { + rc = -ENOMEM; + goto err_recovery_out; + } + } + fw_health->flags = le32_to_cpu(resp->flags); + if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && + !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { + rc = -EINVAL; + goto err_recovery_out; + } + fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); + fw_health->master_func_wait_dsecs = + le32_to_cpu(resp->master_func_wait_period); + fw_health->normal_func_wait_dsecs = + le32_to_cpu(resp->normal_func_wait_period); + fw_health->post_reset_wait_dsecs = + le32_to_cpu(resp->master_func_wait_period_after_reset); + fw_health->post_reset_max_wait_dsecs = + le32_to_cpu(resp->max_bailout_time_after_reset); + fw_health->regs[BNXT_FW_HEALTH_REG] = + le32_to_cpu(resp->fw_health_status_reg); + fw_health->regs[BNXT_FW_HEARTBEAT_REG] = + le32_to_cpu(resp->fw_heartbeat_reg); + fw_health->regs[BNXT_FW_RESET_CNT_REG] = + le32_to_cpu(resp->fw_reset_cnt_reg); + fw_health->regs[BNXT_FW_RESET_INPROG_REG] = + le32_to_cpu(resp->reset_inprogress_reg); + fw_health->fw_reset_inprog_reg_mask = + le32_to_cpu(resp->reset_inprogress_reg_mask); + fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; + if (fw_health->fw_reset_seq_cnt >= 16) { + rc = -EINVAL; + goto err_recovery_out; + } + for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { + fw_health->fw_reset_seq_regs[i] = + le32_to_cpu(resp->reset_reg[i]); + fw_health->fw_reset_seq_vals[i] = + le32_to_cpu(resp->reset_reg_val[i]); + fw_health->fw_reset_seq_delay_msec[i] = + resp->delay_after_reset[i]; + } +err_recovery_out: + mutex_unlock(&bp->hwrm_cmd_lock); + if (!rc) + rc = bnxt_map_fw_health_regs(bp); + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + return rc; +} + static int bnxt_hwrm_func_reset(struct bnxt *bp) { struct hwrm_func_reset_input req = {0}; @@ -6785,20 +7206,30 @@ qportcfg_exit: return rc; } -static int bnxt_hwrm_ver_get(struct bnxt *bp) +static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) { - int rc; struct hwrm_ver_get_input req = {0}; - struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; - u32 dev_caps_cfg; + int rc; - bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); req.hwrm_intf_maj = HWRM_VERSION_MAJOR; req.hwrm_intf_min = HWRM_VERSION_MINOR; req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + + rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, + silent); + return rc; +} + +static int bnxt_hwrm_ver_get(struct bnxt *bp) +{ + struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + u32 dev_caps_cfg; + int rc; + + bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = __bnxt_hwrm_ver_get(bp, false); if (rc) goto hwrm_ver_get_exit; @@ -7001,6 +7432,8 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) if (set_tpa) tpa_flags = bp->flags & BNXT_FLAG_TPA; + else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + return 0; for (i = 0; i < bp->nr_vnics; i++) { rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); if (rc) { @@ -7066,8 +7499,6 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) else return -EINVAL; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - rc = -EIO; return rc; } @@ -7087,8 +7518,6 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - rc = -EIO; return rc; } @@ -7971,6 +8400,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_link_info *link_info = &bp->link_info; + bp->flags &= ~BNXT_FLAG_EEE_CAP; + if (bp->test_info) + bp->test_info->flags &= ~BNXT_TEST_FL_EXT_LPBK; if (bp->hwrm_spec_code < 0x10201) return 0; @@ -8292,11 +8724,14 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_fw_init_one(struct bnxt *bp); + static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) { struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_drv_if_change_input req = {0}; - bool resc_reinit = false; + bool resc_reinit = false, fw_reset = false; + u32 flags = 0; int rc; if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) @@ -8307,26 +8742,57 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc && (resp->flags & - cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE))) - resc_reinit = true; + if (!rc) + flags = le32_to_cpu(resp->flags); mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + return rc; - if (up && resc_reinit && BNXT_NEW_RM(bp)) { - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + if (!up) + return 0; - rc = bnxt_hwrm_func_resc_qcaps(bp, true); - hw_resc->resv_cp_rings = 0; - hw_resc->resv_stat_ctxs = 0; - hw_resc->resv_irqs = 0; - hw_resc->resv_tx_rings = 0; - hw_resc->resv_rx_rings = 0; - hw_resc->resv_hw_ring_grps = 0; - hw_resc->resv_vnics = 0; - bp->tx_nr_rings = 0; - bp->rx_nr_rings = 0; + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) + resc_reinit = true; + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) + fw_reset = true; + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { + netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); + return -ENODEV; } - return rc; + if (resc_reinit || fw_reset) { + if (fw_reset) { + rc = bnxt_fw_init_one(bp); + if (rc) { + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); + return rc; + } + bnxt_clear_int_mode(bp); + rc = bnxt_init_int_mode(bp); + if (rc) { + netdev_err(bp->dev, "init int mode failed\n"); + return rc; + } + set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); + } + if (BNXT_NEW_RM(bp)) { + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + rc = bnxt_hwrm_func_resc_qcaps(bp, true); + hw_resc->resv_cp_rings = 0; + hw_resc->resv_stat_ctxs = 0; + hw_resc->resv_irqs = 0; + hw_resc->resv_tx_rings = 0; + hw_resc->resv_rx_rings = 0; + hw_resc->resv_hw_ring_grps = 0; + hw_resc->resv_vnics = 0; + if (!fw_reset) { + bp->tx_nr_rings = 0; + bp->rx_nr_rings = 0; + } + } + } + return 0; } static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) @@ -8336,6 +8802,7 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) struct bnxt_pf_info *pf = &bp->pf; int rc; + bp->num_leds = 0; if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) return 0; @@ -8430,6 +8897,7 @@ static void bnxt_get_wol_settings(struct bnxt *bp) { u16 handle = 0; + bp->wol = 0; if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) return; @@ -8476,6 +8944,9 @@ static void bnxt_hwmon_open(struct bnxt *bp) { struct pci_dev *pdev = bp->pdev; + if (bp->hwmon_dev) + return; + bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, DRV_MODULE_NAME, bp, bnxt_groups); @@ -8741,12 +9212,28 @@ static int bnxt_open(struct net_device *dev) struct bnxt *bp = netdev_priv(dev); int rc; - bnxt_hwrm_if_change(bp, true); - rc = __bnxt_open_nic(bp, true, true); + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n"); + return -ENODEV; + } + + rc = bnxt_hwrm_if_change(bp, true); if (rc) + return rc; + rc = __bnxt_open_nic(bp, true, true); + if (rc) { bnxt_hwrm_if_change(bp, false); + } else { + if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) && + BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + int n = pf->active_vfs; - bnxt_hwmon_open(bp); + if (n) + bnxt_cfg_hw_sriov(bp, &n, true); + } + bnxt_hwmon_open(bp); + } return rc; } @@ -8783,6 +9270,10 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bnxt_debug_dev_exit(bp); bnxt_disable_napi(bp); del_timer_sync(&bp->timer); + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && + pci_is_enabled(bp->pdev)) + pci_disable_device(bp->pdev); + bnxt_free_skbs(bp); /* Save ring stats before shutdown */ @@ -8799,6 +9290,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + /* If we get here, it means firmware reset is in progress + * while we are trying to close. We can safely proceed with + * the close because we are holding rtnl_lock(). Some firmware + * messages may fail as we proceed to close. We set the + * ABORT_ERR flag here so that the FW reset thread will later + * abort when it gets the rtnl_lock() and sees the flag. + */ + netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); + } + #ifdef CONFIG_BNXT_SRIOV if (bp->sriov_cfg) { rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, @@ -9056,14 +9559,16 @@ static bool bnxt_uc_list_updated(struct bnxt *bp) static void bnxt_set_rx_mode(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; - u32 mask = vnic->rx_mask; + struct bnxt_vnic_info *vnic; bool mc_update = false; bool uc_update; + u32 mask; - if (!netif_running(dev)) + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) return; + vnic = &bp->vnic_info[0]; + mask = vnic->rx_mask; mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | @@ -9306,7 +9811,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (changes & BNXT_FLAG_TPA) { update_tpa = true; if ((bp->flags & BNXT_FLAG_TPA) == 0 || - (flags & BNXT_FLAG_TPA) == 0) + (flags & BNXT_FLAG_TPA) == 0 || + (bp->flags & BNXT_FLAG_CHIP_P5)) re_init = true; } @@ -9316,9 +9822,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (flags != bp->flags) { u32 old_flags = bp->flags; - bp->flags = flags; - if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + bp->flags = flags; if (update_tpa) bnxt_set_ring_params(bp); return rc; @@ -9326,12 +9831,14 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (re_init) { bnxt_close_nic(bp, false, false); + bp->flags = flags; if (update_tpa) bnxt_set_ring_params(bp); return bnxt_open_nic(bp, false, false); } if (update_tpa) { + bp->flags = flags; rc = bnxt_set_tpa(bp, (flags & BNXT_FLAG_TPA) ? true : false); @@ -9438,6 +9945,38 @@ static void bnxt_tx_timeout(struct net_device *dev) bnxt_queue_sp_work(bp); } +static void bnxt_fw_health_check(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 val; + + if (!fw_health || !fw_health->enabled || + test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return; + + if (fw_health->tmr_counter) { + fw_health->tmr_counter--; + return; + } + + val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); + if (val == fw_health->last_fw_heartbeat) + goto fw_reset; + + fw_health->last_fw_heartbeat = val; + + val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + if (val != fw_health->last_fw_reset_cnt) + goto fw_reset; + + fw_health->tmr_counter = fw_health->tmr_multiplier; + return; + +fw_reset: + set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); +} + static void bnxt_timer(struct timer_list *t) { struct bnxt *bp = from_timer(bp, t, timer); @@ -9449,6 +9988,9 @@ static void bnxt_timer(struct timer_list *t) if (atomic_read(&bp->intr_sem) != 0) goto bnxt_restart_timer; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + bnxt_fw_health_check(bp); + if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && bp->stats_coal_ticks) { set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); @@ -9504,6 +10046,138 @@ static void bnxt_reset(struct bnxt *bp, bool silent) bnxt_rtnl_unlock_sp(bp); } +static void bnxt_fw_reset_close(struct bnxt *bp) +{ + __bnxt_close_nic(bp, true, false); + bnxt_ulp_irq_stop(bp); + bnxt_clear_int_mode(bp); + bnxt_hwrm_func_drv_unrgtr(bp); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; +} + +static bool is_bnxt_fw_ok(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + bool no_heartbeat = false, has_reset = false; + u32 val; + + val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); + if (val == fw_health->last_fw_heartbeat) + no_heartbeat = true; + + val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + if (val != fw_health->last_fw_reset_cnt) + has_reset = true; + + if (!no_heartbeat && has_reset) + return true; + + return false; +} + +/* rtnl_lock is acquired before calling this function */ +static void bnxt_force_fw_reset(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 wait_dsecs; + + if (!test_bit(BNXT_STATE_OPEN, &bp->state) || + test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return; + + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + bnxt_fw_reset_close(bp); + wait_dsecs = fw_health->master_func_wait_dsecs; + if (fw_health->master) { + if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) + wait_dsecs = 0; + bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; + } else { + bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; + wait_dsecs = fw_health->normal_func_wait_dsecs; + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + } + + bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; + bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; + bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); +} + +void bnxt_fw_exception(struct bnxt *bp) +{ + set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + bnxt_rtnl_lock_sp(bp); + bnxt_force_fw_reset(bp); + bnxt_rtnl_unlock_sp(bp); +} + +/* Returns the number of registered VFs, or 1 if VF configuration is pending, or + * < 0 on error. + */ +static int bnxt_get_registered_vfs(struct bnxt *bp) +{ +#ifdef CONFIG_BNXT_SRIOV + int rc; + + if (!BNXT_PF(bp)) + return 0; + + rc = bnxt_hwrm_func_qcfg(bp); + if (rc) { + netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); + return rc; + } + if (bp->pf.registered_vfs) + return bp->pf.registered_vfs; + if (bp->sriov_cfg) + return 1; +#endif + return 0; +} + +void bnxt_fw_reset(struct bnxt *bp) +{ + bnxt_rtnl_lock_sp(bp); + if (test_bit(BNXT_STATE_OPEN, &bp->state) && + !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + int n = 0, tmo; + + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (bp->pf.active_vfs && + !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + n = bnxt_get_registered_vfs(bp); + if (n < 0) { + netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", + n); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + dev_close(bp->dev); + goto fw_reset_exit; + } else if (n > 0) { + u16 vf_tmo_dsecs = n * 10; + + if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) + bp->fw_reset_max_dsecs = vf_tmo_dsecs; + bp->fw_reset_state = + BNXT_FW_RESET_STATE_POLL_VF; + bnxt_queue_fw_reset_work(bp, HZ / 10); + goto fw_reset_exit; + } + bnxt_fw_reset_close(bp); + if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; + tmo = HZ / 10; + } else { + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + tmo = bp->fw_reset_min_dsecs * HZ / 10; + } + bnxt_queue_fw_reset_work(bp, tmo); + } +fw_reset_exit: + bnxt_rtnl_unlock_sp(bp); +} + static void bnxt_chk_missed_irq(struct bnxt *bp) { int i; @@ -9634,6 +10308,15 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) bnxt_reset(bp, true); + if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) + bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); + + if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { + if (!is_bnxt_fw_ok(bp)) + bnxt_devlink_health_report(bp, + BNXT_FW_EXCEPTION_SP_EVENT); + } + smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); } @@ -9728,6 +10411,336 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } +static int bnxt_fw_init_one_p1(struct bnxt *bp) +{ + int rc; + + bp->fw_cap = 0; + rc = bnxt_hwrm_ver_get(bp); + if (rc) + return rc; + + if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { + rc = bnxt_alloc_kong_hwrm_resources(bp); + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; + } + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { + rc = bnxt_alloc_hwrm_short_cmd_req(bp); + if (rc) + return rc; + } + rc = bnxt_hwrm_func_reset(bp); + if (rc) + return -ENODEV; + + bnxt_hwrm_fw_set_time(bp); + return 0; +} + +static int bnxt_fw_init_one_p2(struct bnxt *bp) +{ + int rc; + + /* Get the MAX capabilities for this function */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", + rc); + return -ENODEV; + } + + rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); + if (rc) + netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", + rc); + + rc = bnxt_hwrm_error_recovery_qcfg(bp); + if (rc) + netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", + rc); + + rc = bnxt_hwrm_func_drv_rgtr(bp); + if (rc) + return -ENODEV; + + rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); + if (rc) + return -ENODEV; + + bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_vnic_qcaps(bp); + bnxt_hwrm_port_led_qcaps(bp); + bnxt_ethtool_init(bp); + bnxt_dcb_init(bp); + return 0; +} + +static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) +{ + bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; + bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; + if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) { + bp->flags |= BNXT_FLAG_UDP_RSS_CAP; + bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; + } +} + +static void bnxt_set_dflt_rfs(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + + dev->hw_features &= ~NETIF_F_NTUPLE; + dev->features &= ~NETIF_F_NTUPLE; + bp->flags &= ~BNXT_FLAG_RFS; + if (bnxt_rfs_supported(bp)) { + dev->hw_features |= NETIF_F_NTUPLE; + if (bnxt_rfs_capable(bp)) { + bp->flags |= BNXT_FLAG_RFS; + dev->features |= NETIF_F_NTUPLE; + } + } +} + +static void bnxt_fw_init_one_p3(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + + bnxt_set_dflt_rss_hash_type(bp); + bnxt_set_dflt_rfs(bp); + + bnxt_get_wol_settings(bp); + if (bp->flags & BNXT_FLAG_WOL_CAP) + device_set_wakeup_enable(&pdev->dev, bp->wol); + else + device_set_wakeup_capable(&pdev->dev, false); + + bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); + bnxt_hwrm_coal_params_qcaps(bp); +} + +static int bnxt_fw_init_one(struct bnxt *bp) +{ + int rc; + + rc = bnxt_fw_init_one_p1(bp); + if (rc) { + netdev_err(bp->dev, "Firmware init phase 1 failed\n"); + return rc; + } + rc = bnxt_fw_init_one_p2(bp); + if (rc) { + netdev_err(bp->dev, "Firmware init phase 2 failed\n"); + return rc; + } + rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); + if (rc) + return rc; + bnxt_fw_init_one_p3(bp); + return 0; +} + +static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; + u32 val = fw_health->fw_reset_seq_vals[reg_idx]; + u32 reg_type, reg_off, delay_msecs; + + delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; + reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); + reg_off = BNXT_FW_HEALTH_REG_OFF(reg); + switch (reg_type) { + case BNXT_FW_HEALTH_REG_TYPE_CFG: + pci_write_config_dword(bp->pdev, reg_off, val); + break; + case BNXT_FW_HEALTH_REG_TYPE_GRC: + writel(reg_off & BNXT_GRC_BASE_MASK, + bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); + reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; + /* fall through */ + case BNXT_FW_HEALTH_REG_TYPE_BAR0: + writel(val, bp->bar0 + reg_off); + break; + case BNXT_FW_HEALTH_REG_TYPE_BAR1: + writel(val, bp->bar1 + reg_off); + break; + } + if (delay_msecs) { + pci_read_config_dword(bp->pdev, 0, &val); + msleep(delay_msecs); + } +} + +static void bnxt_reset_all(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + int i; + + if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { + for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) + bnxt_fw_reset_writel(bp, i); + } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { + struct hwrm_fw_reset_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); + req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; + req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); + } + bp->fw_reset_timestamp = jiffies; +} + +static void bnxt_fw_reset_task(struct work_struct *work) +{ + struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); + int rc; + + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); + return; + } + + switch (bp->fw_reset_state) { + case BNXT_FW_RESET_STATE_POLL_VF: { + int n = bnxt_get_registered_vfs(bp); + int tmo; + + if (n < 0) { + netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", + n, jiffies_to_msecs(jiffies - + bp->fw_reset_timestamp)); + goto fw_reset_abort; + } else if (n > 0) { + if (time_after(jiffies, bp->fw_reset_timestamp + + (bp->fw_reset_max_dsecs * HZ / 10))) { + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + bp->fw_reset_state = 0; + netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", + n); + return; + } + bnxt_queue_fw_reset_work(bp, HZ / 10); + return; + } + bp->fw_reset_timestamp = jiffies; + rtnl_lock(); + bnxt_fw_reset_close(bp); + if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; + tmo = HZ / 10; + } else { + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + tmo = bp->fw_reset_min_dsecs * HZ / 10; + } + rtnl_unlock(); + bnxt_queue_fw_reset_work(bp, tmo); + return; + } + case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { + u32 val; + + val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + if (!(val & BNXT_FW_STATUS_SHUTDOWN) && + !time_after(jiffies, bp->fw_reset_timestamp + + (bp->fw_reset_max_dsecs * HZ / 10))) { + bnxt_queue_fw_reset_work(bp, HZ / 5); + return; + } + + if (!bp->fw_health->master) { + u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; + + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); + return; + } + bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; + } + /* fall through */ + case BNXT_FW_RESET_STATE_RESET_FW: { + u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs; + + bnxt_reset_all(bp); + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); + return; + } + case BNXT_FW_RESET_STATE_ENABLE_DEV: + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && + bp->fw_health) { + u32 val; + + val = bnxt_fw_health_readl(bp, + BNXT_FW_RESET_INPROG_REG); + if (val) + netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n", + val); + } + clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + if (pci_enable_device(bp->pdev)) { + netdev_err(bp->dev, "Cannot re-enable PCI device\n"); + goto fw_reset_abort; + } + pci_set_master(bp->pdev); + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; + /* fall through */ + case BNXT_FW_RESET_STATE_POLL_FW: + bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; + rc = __bnxt_hwrm_ver_get(bp, true); + if (rc) { + if (time_after(jiffies, bp->fw_reset_timestamp + + (bp->fw_reset_max_dsecs * HZ / 10))) { + netdev_err(bp->dev, "Firmware reset aborted\n"); + goto fw_reset_abort; + } + bnxt_queue_fw_reset_work(bp, HZ / 5); + return; + } + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; + bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; + /* fall through */ + case BNXT_FW_RESET_STATE_OPENING: + while (!rtnl_trylock()) { + bnxt_queue_fw_reset_work(bp, HZ / 10); + return; + } + rc = bnxt_open(bp->dev); + if (rc) { + netdev_err(bp->dev, "bnxt_open_nic() failed\n"); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + dev_close(bp->dev); + } + bnxt_ulp_irq_restart(bp, rc); + rtnl_unlock(); + + bp->fw_reset_state = 0; + /* Make sure fw_reset_state is 0 before clearing the flag */ + smp_mb__before_atomic(); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + break; + } + return; + +fw_reset_abort: + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + bp->fw_reset_state = 0; + rtnl_lock(); + dev_close(bp->dev); + rtnl_unlock(); +} + static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) { int rc; @@ -9790,6 +10803,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) pci_enable_pcie_error_reporting(pdev); INIT_WORK(&bp->sp_task, bnxt_sp_task); + INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); spin_lock_init(&bp->ntp_fltr_lock); #if BITS_PER_LONG == 32 @@ -10333,7 +11347,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) free_netdev(dev); } -static int bnxt_probe_phy(struct bnxt *bp) +static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) { int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; @@ -10344,8 +11358,6 @@ static int bnxt_probe_phy(struct bnxt *bp) rc); return rc; } - mutex_init(&bp->link_lock); - rc = bnxt_update_link(bp, false); if (rc) { netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", @@ -10359,6 +11371,9 @@ static int bnxt_probe_phy(struct bnxt *bp) if (link_info->auto_link_speeds && !link_info->support_auto_speeds) link_info->support_auto_speeds = link_info->support_speeds; + if (!fw_dflt) + return 0; + /*initialize the ethool setting copy with NVM settings */ if (BNXT_AUTO_MODE(link_info->auto_mode)) { link_info->autoneg = BNXT_AUTONEG_SPEED; @@ -10379,7 +11394,7 @@ static int bnxt_probe_phy(struct bnxt *bp) link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; else link_info->req_flow_ctrl = link_info->force_pause_setting; - return rc; + return 0; } static int bnxt_get_max_irq(struct pci_dev *pdev) @@ -10683,32 +11698,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_err_pci_clean; mutex_init(&bp->hwrm_cmd_lock); - rc = bnxt_hwrm_ver_get(bp); + mutex_init(&bp->link_lock); + + rc = bnxt_fw_init_one_p1(bp); if (rc) goto init_err_pci_clean; - if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { - rc = bnxt_alloc_kong_hwrm_resources(bp); - if (rc) - bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; - } - - if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || - bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { - rc = bnxt_alloc_hwrm_short_cmd_req(bp); - if (rc) - goto init_err_pci_clean; - } - if (BNXT_CHIP_P5(bp)) bp->flags |= BNXT_FLAG_CHIP_P5; - rc = bnxt_hwrm_func_reset(bp); + rc = bnxt_fw_init_one_p2(bp); if (rc) goto init_err_pci_clean; - bnxt_hwrm_fw_set_time(bp); - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | @@ -10746,41 +11748,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->gro_func = bnxt_gro_func_5730x; if (BNXT_CHIP_P4(bp)) bp->gro_func = bnxt_gro_func_5731x; + else if (BNXT_CHIP_P5(bp)) + bp->gro_func = bnxt_gro_func_5750x; } if (!BNXT_CHIP_P4_PLUS(bp)) bp->flags |= BNXT_FLAG_DOUBLE_DB; - rc = bnxt_hwrm_func_drv_rgtr(bp); - if (rc) - goto init_err_pci_clean; - - rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); - if (rc) - goto init_err_pci_clean; - bp->ulp_probe = bnxt_ulp_probe; - rc = bnxt_hwrm_queue_qportcfg(bp); - if (rc) { - netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", - rc); - rc = -1; - goto init_err_pci_clean; - } - /* Get the MAX capabilities for this function */ - rc = bnxt_hwrm_func_qcaps(bp); - if (rc) { - netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", - rc); - rc = -1; - goto init_err_pci_clean; - } - - rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); - if (rc) - netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", - rc); - rc = bnxt_init_mac_addr(bp); if (rc) { dev_err(&pdev->dev, "Unable to initialize mac address.\n"); @@ -10794,17 +11769,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; } - bnxt_hwrm_func_qcfg(bp); - bnxt_hwrm_vnic_qcaps(bp); - bnxt_hwrm_port_led_qcaps(bp); - bnxt_ethtool_init(bp); - bnxt_dcb_init(bp); /* MTU range: 60 - FW defined max */ dev->min_mtu = ETH_ZLEN; dev->max_mtu = bp->max_mtu; - rc = bnxt_probe_phy(bp); + rc = bnxt_probe_phy(bp, true); if (rc) goto init_err_pci_clean; @@ -10818,24 +11788,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_err_pci_clean; } - /* Default RSS hash cfg. */ - bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; - if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) { - bp->flags |= BNXT_FLAG_UDP_RSS_CAP; - bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; - } - - if (bnxt_rfs_supported(bp)) { - dev->hw_features |= NETIF_F_NTUPLE; - if (bnxt_rfs_capable(bp)) { - bp->flags |= BNXT_FLAG_RFS; - dev->features |= NETIF_F_NTUPLE; - } - } + bnxt_fw_init_one_p3(bp); if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) bp->flags |= BNXT_FLAG_STRIP_VLAN; @@ -10849,16 +11802,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ bp->tx_nr_rings_per_tc = bp->tx_nr_rings; - bnxt_get_wol_settings(bp); - if (bp->flags & BNXT_FLAG_WOL_CAP) - device_set_wakeup_enable(&pdev->dev, bp->wol); - else - device_set_wakeup_capable(&pdev->dev, false); - - bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); - - bnxt_hwrm_coal_params_qcaps(bp); - if (BNXT_PF(bp)) { if (!bnxt_pf_wq) { bnxt_pf_wq = @@ -10895,6 +11838,8 @@ init_err_pci_clean: bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; + kfree(bp->fw_health); + bp->fw_health = NULL; bnxt_cleanup_pci(bp); init_err_free: @@ -10934,8 +11879,7 @@ shutdown_exit: #ifdef CONFIG_PM_SLEEP static int bnxt_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnxt *bp = netdev_priv(dev); int rc = 0; @@ -10951,8 +11895,7 @@ static int bnxt_suspend(struct device *device) static int bnxt_resume(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnxt *bp = netdev_priv(dev); int rc = 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 16694b704d15..d333589811a5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -113,6 +113,7 @@ struct tx_cmp { #define CMP_TYPE_RX_AGG_CMP 18 #define CMP_TYPE_RX_L2_TPA_START_CMP 19 #define CMP_TYPE_RX_L2_TPA_END_CMP 21 + #define CMP_TYPE_RX_TPA_AGG_CMP 22 #define CMP_TYPE_STATUS_CMP 32 #define CMP_TYPE_REMOTE_DRIVER_REQ 34 #define CMP_TYPE_REMOTE_DRIVER_RESP 36 @@ -263,14 +264,21 @@ struct rx_agg_cmp { u32 rx_agg_cmp_opaque; __le32 rx_agg_cmp_v; #define RX_AGG_CMP_V (1 << 0) + #define RX_AGG_CMP_AGG_ID (0xffff << 16) + #define RX_AGG_CMP_AGG_ID_SHIFT 16 __le32 rx_agg_cmp_unused; }; +#define TPA_AGG_AGG_ID(rx_agg) \ + ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \ + RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT) + struct rx_tpa_start_cmp { __le32 rx_tpa_start_cmp_len_flags_type; #define RX_TPA_START_CMP_TYPE (0x3f << 0) #define RX_TPA_START_CMP_FLAGS (0x3ff << 6) #define RX_TPA_START_CMP_FLAGS_SHIFT 6 + #define RX_TPA_START_CMP_FLAGS_ERROR (0x1 << 6) #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7) #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7) @@ -278,6 +286,7 @@ struct rx_tpa_start_cmp { #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10) + #define RX_TPA_START_CMP_FLAGS_TIMESTAMP (0x1 << 11) #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12) #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12 #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12) @@ -291,6 +300,8 @@ struct rx_tpa_start_cmp { #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9 #define RX_TPA_START_CMP_AGG_ID (0x7f << 25) #define RX_TPA_START_CMP_AGG_ID_SHIFT 25 + #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16) + #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16 __le32 rx_tpa_start_cmp_rss_hash; }; @@ -308,6 +319,14 @@ struct rx_tpa_start_cmp { ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT) +#define TPA_START_AGG_ID_P5(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_AGG_ID_P5) >> RX_TPA_START_CMP_AGG_ID_SHIFT_P5) + +#define TPA_START_ERROR(rx_tpa_start) \ + ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR)) + struct rx_tpa_start_cmp_ext { __le32 rx_tpa_start_cmp_flags2; #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0) @@ -315,10 +334,20 @@ struct rx_tpa_start_cmp_ext { #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8) + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9) + #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10) + #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10 + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16) + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16 __le32 rx_tpa_start_cmp_metadata; __le32 rx_tpa_start_cmp_cfa_code_v2; #define RX_TPA_START_CMP_V2 (0x1 << 0) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK (0x7 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1 + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1) #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 __le32 rx_tpa_start_cmp_hdr_info; @@ -332,6 +361,11 @@ struct rx_tpa_start_cmp_ext { (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \ cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE))) +#define TPA_START_ERROR_CODE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \ + RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT) + struct rx_tpa_end_cmp { __le32 rx_tpa_end_cmp_len_flags_type; #define RX_TPA_END_CMP_TYPE (0x3f << 0) @@ -361,6 +395,8 @@ struct rx_tpa_end_cmp { #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16 #define RX_TPA_END_CMP_AGG_ID (0x7f << 25) #define RX_TPA_END_CMP_AGG_ID_SHIFT 25 + #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16) + #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16 __le32 rx_tpa_end_cmp_tsdelta; #define RX_TPA_END_GRO_TS (0x1 << 31) @@ -370,6 +406,18 @@ struct rx_tpa_end_cmp { ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT) +#define TPA_END_AGG_ID_P5(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_ID_P5) >> RX_TPA_END_CMP_AGG_ID_SHIFT_P5) + +#define TPA_END_PAYLOAD_OFF(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT) + +#define TPA_END_AGG_BUFS(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT) + #define TPA_END_TPA_SEGS(rx_tpa_end) \ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT) @@ -389,6 +437,10 @@ struct rx_tpa_end_cmp { struct rx_tpa_end_cmp_ext { __le32 rx_tpa_end_cmp_dup_acks; #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0) + #define RX_TPA_END_CMP_PAYLOAD_OFFSET_P5 (0xff << 16) + #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5 16 + #define RX_TPA_END_CMP_AGG_BUFS_P5 (0xff << 24) + #define RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5 24 __le32 rx_tpa_end_cmp_seg_len; #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0) @@ -396,7 +448,13 @@ struct rx_tpa_end_cmp_ext { __le32 rx_tpa_end_cmp_errors_v2; #define RX_TPA_END_CMP_V2 (0x1 << 0) #define RX_TPA_END_CMP_ERRORS (0x3 << 1) + #define RX_TPA_END_CMP_ERRORS_P5 (0x7 << 1) #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1) u32 rx_tpa_end_cmp_start_opaque; }; @@ -405,6 +463,28 @@ struct rx_tpa_end_cmp_ext { ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \ cpu_to_le32(RX_TPA_END_CMP_ERRORS)) +#define TPA_END_PAYLOAD_OFF_P5(rx_tpa_end_ext) \ + ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \ + RX_TPA_END_CMP_PAYLOAD_OFFSET_P5) >> \ + RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5) + +#define TPA_END_AGG_BUFS_P5(rx_tpa_end_ext) \ + ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \ + RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5) + +#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) + +#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \ + !!((data1) & \ + ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC) + +#define EVENT_DATA1_RECOVERY_ENABLED(data1) \ + !!((data1) & \ + ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED) + struct nqe_cn { __le16 type; #define NQ_CN_TYPE_MASK 0x3fUL @@ -487,6 +567,9 @@ struct nqe_cn { #define BNXT_DEFAULT_TX_RING_SIZE 511 #define MAX_TPA 64 +#define MAX_TPA_P5 256 +#define MAX_TPA_P5_MASK (MAX_TPA_P5 - 1) +#define MAX_TPA_SEGS_P5 0x3f #if (BNXT_PAGE_SHIFT == 16) #define MAX_RX_PAGES 1 @@ -562,8 +645,10 @@ struct nqe_cn { #define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) #define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) #define DFLT_HWRM_CMD_TIMEOUT 500 +#define SHORT_HWRM_CMD_TIMEOUT 20 #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) +#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) #define HWRM_RESP_ERR_CODE_MASK 0xffff #define HWRM_RESP_LEN_OFFSET 4 #define HWRM_RESP_LEN_MASK 0xffff0000 @@ -768,6 +853,15 @@ struct bnxt_tpa_info { ((hdr_info) & 0x1ff) u16 cfa_code; /* cfa_code in TPA start compl */ + u8 agg_count; + struct rx_agg_cmp *agg_arr; +}; + +#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG) + +struct bnxt_tpa_idx_map { + u16 agg_id_tbl[1024]; + unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE]; }; struct bnxt_rx_ring_info { @@ -797,6 +891,7 @@ struct bnxt_rx_ring_info { dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; struct bnxt_tpa_info *rx_tpa; + struct bnxt_tpa_idx_map *rx_tpa_idx_map; struct bnxt_ring_struct rx_ring_struct; struct bnxt_ring_struct rx_agg_ring_struct; @@ -978,6 +1073,7 @@ struct bnxt_pf_info { u8 mac_addr[ETH_ALEN]; u32 first_vf_id; u16 active_vfs; + u16 registered_vfs; u16 max_vfs; u32 max_encap_records; u32 max_decap_records; @@ -1137,6 +1233,9 @@ struct bnxt_test_info { #define BNXT_GRCPF_REG_KONG_COMM 0xA00 #define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00 +#define BNXT_GRC_BASE_MASK 0xfffff000 +#define BNXT_GRC_OFFSET_MASK 0x00000ffc + struct bnxt_tc_flow_stats { u64 packets; u64 bytes; @@ -1253,6 +1352,54 @@ struct bnxt_ctx_mem_info { struct bnxt_ctx_pg_info *tqm_mem[9]; }; +struct bnxt_fw_health { + u32 flags; + u32 polling_dsecs; + u32 master_func_wait_dsecs; + u32 normal_func_wait_dsecs; + u32 post_reset_wait_dsecs; + u32 post_reset_max_wait_dsecs; + u32 regs[4]; + u32 mapped_regs[4]; +#define BNXT_FW_HEALTH_REG 0 +#define BNXT_FW_HEARTBEAT_REG 1 +#define BNXT_FW_RESET_CNT_REG 2 +#define BNXT_FW_RESET_INPROG_REG 3 + u32 fw_reset_inprog_reg_mask; + u32 last_fw_heartbeat; + u32 last_fw_reset_cnt; + u8 enabled:1; + u8 master:1; + u8 tmr_multiplier; + u8 tmr_counter; + u8 fw_reset_seq_cnt; + u32 fw_reset_seq_regs[16]; + u32 fw_reset_seq_vals[16]; + u32 fw_reset_seq_delay_msec[16]; + struct devlink_health_reporter *fw_reporter; + struct devlink_health_reporter *fw_reset_reporter; + struct devlink_health_reporter *fw_fatal_reporter; +}; + +struct bnxt_fw_reporter_ctx { + unsigned long sp_event; +}; + +#define BNXT_FW_HEALTH_REG_TYPE_MASK 3 +#define BNXT_FW_HEALTH_REG_TYPE_CFG 0 +#define BNXT_FW_HEALTH_REG_TYPE_GRC 1 +#define BNXT_FW_HEALTH_REG_TYPE_BAR0 2 +#define BNXT_FW_HEALTH_REG_TYPE_BAR1 3 + +#define BNXT_FW_HEALTH_REG_TYPE(reg) ((reg) & BNXT_FW_HEALTH_REG_TYPE_MASK) +#define BNXT_FW_HEALTH_REG_OFF(reg) ((reg) & ~BNXT_FW_HEALTH_REG_TYPE_MASK) + +#define BNXT_FW_HEALTH_WIN_BASE 0x3000 +#define BNXT_FW_HEALTH_WIN_MAP_OFF 8 + +#define BNXT_FW_STATUS_HEALTHY 0x8000 +#define BNXT_FW_STATUS_SHUTDOWN 0x100000 + struct bnxt { void __iomem *bar0; void __iomem *bar1; @@ -1282,7 +1429,9 @@ struct bnxt { #define CHIP_NUM_5745X 0xd730 -#define CHIP_NUM_57500 0x1750 +#define CHIP_NUM_57508 0x1750 +#define CHIP_NUM_57504 0x1751 +#define CHIP_NUM_57502 0x1752 #define CHIP_NUM_58802 0xd802 #define CHIP_NUM_58804 0xd804 @@ -1379,12 +1528,14 @@ struct bnxt { #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ - !(bp->flags & BNXT_FLAG_CHIP_P5) && \ - !is_kdump_kernel()) + (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \ + (bp)->max_tpa_v2) && !is_kdump_kernel()) /* Chip class phase 5 */ #define BNXT_CHIP_P5(bp) \ - ((bp)->chip_num == CHIP_NUM_57500) + ((bp)->chip_num == CHIP_NUM_57508 || \ + (bp)->chip_num == CHIP_NUM_57504 || \ + (bp)->chip_num == CHIP_NUM_57502) /* Chip class phase 4.x */ #define BNXT_CHIP_P4(bp) \ @@ -1414,6 +1565,8 @@ struct bnxt { u16, void *, u8 *, dma_addr_t, unsigned int); + u16 max_tpa_v2; + u16 max_tpa; u32 rx_buf_size; u32 rx_buf_use_size; /* useable size */ u16 rx_offset; @@ -1469,6 +1622,10 @@ struct bnxt { #define BNXT_STATE_OPEN 0 #define BNXT_STATE_IN_SP_TASK 1 #define BNXT_STATE_READ_STATS 2 +#define BNXT_STATE_FW_RESET_DET 3 +#define BNXT_STATE_IN_FW_RESET 4 +#define BNXT_STATE_ABORT_ERR 5 +#define BNXT_STATE_FW_FATAL_COND 6 struct bnxt_irq *irq_tbl; int total_irqs; @@ -1493,11 +1650,13 @@ struct bnxt { #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080 #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400 #define BNXT_FW_CAP_TRUSTED_VF 0x00000800 + #define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000 #define BNXT_FW_CAP_PKG_VER 0x00004000 #define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000 #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000 #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000 #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000 + #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; @@ -1525,6 +1684,7 @@ struct bnxt { int hw_port_stats_size; u16 fw_rx_stats_ext_size; u16 fw_tx_stats_ext_size; + u16 hw_ring_stats_size; u8 pri2cos[8]; u8 pri2cos_valid; @@ -1576,6 +1736,25 @@ struct bnxt { #define BNXT_FLOW_STATS_SP_EVENT 15 #define BNXT_UPDATE_PHY_SP_EVENT 16 #define BNXT_RING_COAL_NOW_SP_EVENT 17 +#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18 +#define BNXT_FW_EXCEPTION_SP_EVENT 19 + + struct delayed_work fw_reset_task; + int fw_reset_state; +#define BNXT_FW_RESET_STATE_POLL_VF 1 +#define BNXT_FW_RESET_STATE_RESET_FW 2 +#define BNXT_FW_RESET_STATE_ENABLE_DEV 3 +#define BNXT_FW_RESET_STATE_POLL_FW 4 +#define BNXT_FW_RESET_STATE_OPENING 5 +#define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6 + + u16 fw_reset_min_dsecs; +#define BNXT_DFLT_FW_RST_MIN_DSECS 20 + u16 fw_reset_max_dsecs; +#define BNXT_DFLT_FW_RST_MAX_DSECS 60 + unsigned long fw_reset_timestamp; + + struct bnxt_fw_health *fw_health; struct bnxt_hw_resc hw_resc; struct bnxt_pf_info pf; @@ -1637,7 +1816,6 @@ struct bnxt { u8 switch_id[8]; struct bnxt_tc_info *tc_info; struct dentry *debugfs_pdev; - struct dentry *debugfs_dim; struct device *hwmon_dev; }; @@ -1782,6 +1960,7 @@ extern const u16 bnxt_lhint_arr[]; int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 prod, gfp_t gfp); void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data); +u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx); void bnxt_set_tpa_flags(struct bnxt *bp); void bnxt_set_ring_params(struct bnxt *); int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); @@ -1814,6 +1993,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); int bnxt_close_nic(struct bnxt *, bool, bool); +void bnxt_fw_exception(struct bnxt *bp); +void bnxt_fw_reset(struct bnxt *bp); int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int tx_xdp); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 07301cb87c03..fb6f30d0d1d0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -377,8 +377,6 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); set.hdr_cnt = 1; rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT); - if (rc) - rc = -EIO; set_app_exit: dma_free_coherent(&bp->pdev->dev, data_len, data, mapping); @@ -391,6 +389,7 @@ static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp) struct hwrm_queue_dscp_qcaps_input req = {0}; int rc; + bp->max_dscp_value = 0; if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp)) return 0; @@ -433,8 +432,6 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, dscp2pri->pri = app->priority; req.entry_cnt = cpu_to_le16(1); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - rc = -EIO; dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri, mapping); return rc; @@ -722,6 +719,7 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = { void bnxt_dcb_init(struct bnxt *bp) { + bp->dcbx_cap = 0; if (bp->hwrm_spec_code < 0x10501) return; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c index 61393f351a77..156c2404854f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c @@ -61,45 +61,30 @@ static const struct file_operations debugfs_dim_fops = { .read = debugfs_dim_read, }; -static struct dentry *debugfs_dim_ring_init(struct dim *dim, int ring_idx, - struct dentry *dd) +static void debugfs_dim_ring_init(struct dim *dim, int ring_idx, + struct dentry *dd) { static char qname[16]; snprintf(qname, 10, "%d", ring_idx); - return debugfs_create_file(qname, 0600, dd, - dim, &debugfs_dim_fops); + debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops); } void bnxt_debug_dev_init(struct bnxt *bp) { const char *pname = pci_name(bp->pdev); - struct dentry *pdevf; + struct dentry *dir; int i; bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt); - if (bp->debugfs_pdev) { - pdevf = debugfs_create_dir("dim", bp->debugfs_pdev); - if (!pdevf) { - pr_err("failed to create debugfs entry %s/dim\n", - pname); - return; - } - bp->debugfs_dim = pdevf; - /* create files for each rx ring */ - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + dir = debugfs_create_dir("dim", bp->debugfs_pdev); - if (cpr && bp->bnapi[i]->rx_ring) { - pdevf = debugfs_dim_ring_init(&cpr->dim, i, - bp->debugfs_dim); - if (!pdevf) - pr_err("failed to create debugfs entry %s/dim/%d\n", - pname, i); - } - } - } else { - pr_err("failed to create debugfs entry %s\n", pname); + /* create files for each rx ring */ + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + + if (cpr && bp->bnapi[i]->rx_ring) + debugfs_dim_ring_init(&cpr->dim, i, dir); } } @@ -114,8 +99,6 @@ void bnxt_debug_dev_exit(struct bnxt *bp) void bnxt_debug_init(void) { bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL); - if (!bnxt_debug_mnt) - pr_err("failed to init bnxt_en debugfs\n"); } void bnxt_debug_exit(void) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index c05d663212b2..e664392dccc0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -15,6 +15,192 @@ #include "bnxt_vfr.h" #include "bnxt_devlink.h" +static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg) +{ + struct bnxt *bp = devlink_health_reporter_priv(reporter); + struct bnxt_fw_health *health = bp->fw_health; + u32 val, health_status; + int rc; + + if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return 0; + + val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + health_status = val & 0xffff; + + if (health_status == BNXT_FW_STATUS_HEALTHY) { + rc = devlink_fmsg_string_pair_put(fmsg, "FW status", + "Healthy;"); + if (rc) + return rc; + } else if (health_status < BNXT_FW_STATUS_HEALTHY) { + rc = devlink_fmsg_string_pair_put(fmsg, "FW status", + "Not yet completed initialization;"); + if (rc) + return rc; + } else if (health_status > BNXT_FW_STATUS_HEALTHY) { + rc = devlink_fmsg_string_pair_put(fmsg, "FW status", + "Encountered fatal error and cannot recover;"); + if (rc) + return rc; + } + + if (val >> 16) { + rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16); + if (rc) + return rc; + } + + val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val); + if (rc) + return rc; + + return 0; +} + +static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = { + .name = "fw", + .diagnose = bnxt_fw_reporter_diagnose, +}; + +static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter, + void *priv_ctx) +{ + struct bnxt *bp = devlink_health_reporter_priv(reporter); + + if (!priv_ctx) + return -EOPNOTSUPP; + + bnxt_fw_reset(bp); + return 0; +} + +static const +struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = { + .name = "fw_reset", + .recover = bnxt_fw_reset_recover, +}; + +static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter, + void *priv_ctx) +{ + struct bnxt *bp = devlink_health_reporter_priv(reporter); + struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; + unsigned long event; + + if (!priv_ctx) + return -EOPNOTSUPP; + + event = fw_reporter_ctx->sp_event; + if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT) + bnxt_fw_reset(bp); + else if (event == BNXT_FW_EXCEPTION_SP_EVENT) + bnxt_fw_exception(bp); + + return 0; +} + +static const +struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = { + .name = "fw_fatal", + .recover = bnxt_fw_fatal_recover, +}; + +static void bnxt_dl_fw_reporters_create(struct bnxt *bp) +{ + struct bnxt_fw_health *health = bp->fw_health; + + if (!health) + return; + + health->fw_reporter = + devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops, + 0, false, bp); + if (IS_ERR(health->fw_reporter)) { + netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", + PTR_ERR(health->fw_reporter)); + health->fw_reporter = NULL; + } + + health->fw_reset_reporter = + devlink_health_reporter_create(bp->dl, + &bnxt_dl_fw_reset_reporter_ops, + 0, true, bp); + if (IS_ERR(health->fw_reset_reporter)) { + netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", + PTR_ERR(health->fw_reset_reporter)); + health->fw_reset_reporter = NULL; + } + + health->fw_fatal_reporter = + devlink_health_reporter_create(bp->dl, + &bnxt_dl_fw_fatal_reporter_ops, + 0, true, bp); + if (IS_ERR(health->fw_fatal_reporter)) { + netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", + PTR_ERR(health->fw_fatal_reporter)); + health->fw_fatal_reporter = NULL; + } +} + +static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp) +{ + struct bnxt_fw_health *health = bp->fw_health; + + if (!health) + return; + + if (health->fw_reporter) + devlink_health_reporter_destroy(health->fw_reporter); + + if (health->fw_reset_reporter) + devlink_health_reporter_destroy(health->fw_reset_reporter); + + if (health->fw_fatal_reporter) + devlink_health_reporter_destroy(health->fw_fatal_reporter); +} + +void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + struct bnxt_fw_reporter_ctx fw_reporter_ctx; + + if (!fw_health) + return; + + fw_reporter_ctx.sp_event = event; + switch (event) { + case BNXT_FW_RESET_NOTIFY_SP_EVENT: + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { + if (!fw_health->fw_fatal_reporter) + return; + + devlink_health_report(fw_health->fw_fatal_reporter, + "FW fatal async event received", + &fw_reporter_ctx); + return; + } + if (!fw_health->fw_reset_reporter) + return; + + devlink_health_report(fw_health->fw_reset_reporter, + "FW non-fatal reset event received", + &fw_reporter_ctx); + return; + + case BNXT_FW_EXCEPTION_SP_EVENT: + if (!fw_health->fw_fatal_reporter) + return; + + devlink_health_report(fw_health->fw_fatal_reporter, + "FW fatal error reported", + &fw_reporter_ctx); + return; + } +} + static const struct devlink_ops bnxt_dl_ops = { #ifdef CONFIG_BNXT_SRIOV .eswitch_mode_set = bnxt_dl_eswitch_mode_set, @@ -109,13 +295,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, memcpy(buf, data_addr, bytesize); dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr); - if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { + if (rc == -EACCES) netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); - return -EACCES; - } else if (rc) { - return -EIO; - } - return 0; + return rc; } static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, @@ -251,6 +433,8 @@ int bnxt_dl_register(struct bnxt *bp) devlink_params_publish(dl); + bnxt_dl_fw_reporters_create(bp); + return 0; err_dl_port_unreg: @@ -273,6 +457,7 @@ void bnxt_dl_unregister(struct bnxt *bp) if (!dl) return; + bnxt_dl_fw_reporters_destroy(bp); devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, ARRAY_SIZE(bnxt_dl_port_params)); devlink_port_unregister(&bp->dl_port); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 5b6b2c7d97cf..b97e0baeb42d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -55,6 +55,7 @@ struct bnxt_dl_nvm_param { u16 num_bits; }; +void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8445a0cce849..51c140476717 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -137,7 +137,44 @@ reset_coalesce: return rc; } -#define BNXT_NUM_STATS 22 +static const char * const bnxt_ring_stats_str[] = { + "rx_ucast_packets", + "rx_mcast_packets", + "rx_bcast_packets", + "rx_discards", + "rx_drops", + "rx_ucast_bytes", + "rx_mcast_bytes", + "rx_bcast_bytes", + "tx_ucast_packets", + "tx_mcast_packets", + "tx_bcast_packets", + "tx_discards", + "tx_drops", + "tx_ucast_bytes", + "tx_mcast_bytes", + "tx_bcast_bytes", +}; + +static const char * const bnxt_ring_tpa_stats_str[] = { + "tpa_packets", + "tpa_bytes", + "tpa_events", + "tpa_aborts", +}; + +static const char * const bnxt_ring_tpa2_stats_str[] = { + "rx_tpa_eligible_pkt", + "rx_tpa_eligible_bytes", + "rx_tpa_pkt", + "rx_tpa_bytes", + "rx_tpa_errors", +}; + +static const char * const bnxt_ring_sw_stats_str[] = { + "rx_l4_csum_errors", + "missed_irqs", +}; #define BNXT_RX_STATS_ENTRY(counter) \ { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } @@ -207,6 +244,20 @@ reset_coalesce: BNXT_TX_STATS_EXT_COS_ENTRY(6), \ BNXT_TX_STATS_EXT_COS_ENTRY(7) \ +#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ + BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ + BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) + +#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) + #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ __stringify(counter##_pri##n) } @@ -352,6 +403,7 @@ static const struct { BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, }; static const struct { @@ -417,9 +469,29 @@ static const struct { ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) #define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr) +static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) +{ + if (BNXT_SUPPORTS_TPA(bp)) { + if (bp->max_tpa_v2) + return ARRAY_SIZE(bnxt_ring_tpa2_stats_str); + return ARRAY_SIZE(bnxt_ring_tpa_stats_str); + } + return 0; +} + +static int bnxt_get_num_ring_stats(struct bnxt *bp) +{ + int num_stats; + + num_stats = ARRAY_SIZE(bnxt_ring_stats_str) + + ARRAY_SIZE(bnxt_ring_sw_stats_str) + + bnxt_get_num_tpa_ring_stats(bp); + return num_stats * bp->cp_nr_rings; +} + static int bnxt_get_num_stats(struct bnxt *bp) { - int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + int num_stats = bnxt_get_num_ring_stats(bp); num_stats += BNXT_NUM_SW_FUNC_STATS; @@ -460,10 +532,11 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, { u32 i, j = 0; struct bnxt *bp = netdev_priv(dev); - u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; + u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) + + bnxt_get_num_tpa_ring_stats(bp); if (!bp->bnapi) { - j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS; + j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS; goto skip_ring_stats; } @@ -551,56 +624,39 @@ skip_ring_stats: static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnxt *bp = netdev_priv(dev); - u32 i; + static const char * const *str; + u32 i, j, num_str; switch (stringset) { - /* The number of strings must match BNXT_NUM_STATS defined above. */ case ETH_SS_STATS: for (i = 0; i < bp->cp_nr_rings; i++) { - sprintf(buf, "[%d]: rx_ucast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_mcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_bcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_discards", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_drops", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_ucast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_mcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_bcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_ucast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_mcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_bcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_discards", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_drops", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_ucast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_mcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_bcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_events", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_aborts", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_l4_csum_errors", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: missed_irqs", i); - buf += ETH_GSTRING_LEN; + num_str = ARRAY_SIZE(bnxt_ring_stats_str); + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + if (!BNXT_SUPPORTS_TPA(bp)) + goto skip_tpa_stats; + + if (bp->max_tpa_v2) { + num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str); + str = bnxt_ring_tpa2_stats_str; + } else { + num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str); + str = bnxt_ring_tpa_stats_str; + } + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, str[j]); + buf += ETH_GSTRING_LEN; + } +skip_tpa_stats: + num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str); + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_sw_stats_str[j]); + buf += ETH_GSTRING_LEN; + } } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { strcpy(buf, bnxt_sw_func_stats[i].string); @@ -1643,6 +1699,11 @@ static u32 bnxt_get_link(struct net_device *dev) return bp->link_info.link_up; } +static void bnxt_print_admin_err(struct bnxt *bp) +{ + netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n"); +} + static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext, u16 *index, u32 *item_length, u32 *data_length); @@ -1682,13 +1743,8 @@ static int bnxt_flash_nvram(struct net_device *dev, rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); - if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { - netdev_info(dev, - "PF does not have admin privileges to flash the device\n"); - rc = -EACCES; - } else if (rc) { - rc = -EIO; - } + if (rc == -EACCES) + bnxt_print_admin_err(bp); return rc; } @@ -1738,13 +1794,8 @@ static int bnxt_firmware_reset(struct net_device *dev, } rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { - netdev_info(dev, - "PF does not have admin privileges to reset the device\n"); - rc = -EACCES; - } else if (rc) { - rc = -EIO; - } + if (rc == -EACCES) + bnxt_print_admin_err(bp); return rc; } @@ -2039,13 +2090,8 @@ static int bnxt_flash_package_from_file(struct net_device *dev, flash_pkg_exit: mutex_unlock(&bp->hwrm_cmd_lock); err_exit: - if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { - netdev_info(dev, - "PF does not have admin privileges to flash the device\n"); - rc = -EACCES; - } else if (hwrm_err) { - rc = -EOPNOTSUPP; - } + if (hwrm_err == -EACCES) + bnxt_print_admin_err(bp); return rc; } @@ -2584,8 +2630,6 @@ static int bnxt_set_phys_id(struct net_device *dev, led_cfg->led_group_id = bp->leds[i].led_group_id; } rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - rc = -EIO; return rc; } @@ -3068,7 +3112,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, req.component_id = cpu_to_le16(component_id); req.segment_id = cpu_to_le16(segment_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT); } static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, @@ -3306,6 +3350,7 @@ void bnxt_ethtool_init(struct bnxt *bp) if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) bnxt_get_pkgver(dev); + bp->num_tests = 0; if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) return; @@ -3315,7 +3360,9 @@ void bnxt_ethtool_init(struct bnxt *bp) if (rc) goto ethtool_init_exit; - test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); + test_info = bp->test_info; + if (!test_info) + test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); if (!test_info) goto ethtool_init_exit; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 12bbb2a207d0..03b197eb793b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,7 +1,8 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2019 Broadcom Limited + * Copyright (c) 2014-2018 Broadcom Limited + * Copyright (c) 2018-2019 Broadcom Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -39,15 +40,16 @@ struct hwrm_resp_hdr { #define TLV_TYPE_ROCE_SP_COMMAND 0x3UL #define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL #define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL -#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL -#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL +#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL #define TLV_TYPE_ENGINE_CKV_IV 0x8003UL #define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL #define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL -#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL -#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL +#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL +#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL #define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL -#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE +#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL +#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL +#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS /* tlv (size:64b/8B) */ @@ -200,10 +202,16 @@ struct cmd_nums { #define HWRM_PORT_QSTATS_EXT 0xb4UL #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL #define HWRM_PORT_PHY_MDIO_READ 0xb6UL + #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL + #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL #define HWRM_FW_RESET 0xc0UL #define HWRM_FW_QSTATUS 0xc1UL #define HWRM_FW_HEALTH_CHECK 0xc2UL #define HWRM_FW_SYNC 0xc3UL + #define HWRM_FW_STATE_BUFFER_QCAPS 0xc4UL + #define HWRM_FW_STATE_QUIESCE 0xc5UL + #define HWRM_FW_STATE_BACKUP 0xc6UL + #define HWRM_FW_STATE_RESTORE 0xc7UL #define HWRM_FW_SET_TIME 0xc8UL #define HWRM_FW_GET_TIME 0xc9UL #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL @@ -215,7 +223,10 @@ struct cmd_nums { #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL #define HWRM_OEM_CMD 0xd4UL #define HWRM_PORT_PRBS_TEST 0xd5UL + #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL + #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL #define HWRM_TEMP_MONITOR_QUERY 0xe0UL + #define HWRM_REG_POWER_QUERY 0xe1UL #define HWRM_WOL_FILTER_ALLOC 0xf0UL #define HWRM_WOL_FILTER_FREE 0xf1UL #define HWRM_WOL_FILTER_QCFG 0xf2UL @@ -267,7 +278,6 @@ struct cmd_nums { #define HWRM_CFA_EEM_OP 0x123UL #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL #define HWRM_CFA_TFLIB 0x125UL - #define HWRM_ENGINE_CKV_HELLO 0x12dUL #define HWRM_ENGINE_CKV_STATUS 0x12eUL #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL @@ -313,6 +323,7 @@ struct cmd_nums { #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL #define HWRM_FUNC_VF_BW_CFG 0x195UL #define HWRM_FUNC_VF_BW_QCFG 0x196UL + #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -410,8 +421,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 69 -#define HWRM_VERSION_STR "1.10.0.69" +#define HWRM_VERSION_RSVD 100 +#define HWRM_VERSION_STR "1.10.0.100" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -624,6 +635,8 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -802,6 +815,37 @@ struct hwrm_async_event_cmpl_vf_cfg_change { #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL }; +/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_default_vnic_change { + __le16 type; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6 + __le16 event_id; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10 +}; + /* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */ struct hwrm_async_event_cmpl_hw_flow_aged { __le16 type; @@ -1044,31 +1088,33 @@ struct hwrm_func_qcaps_output { __le16 fid; __le16 port_id; __le32 flags; - #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL - #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL - #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL - #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL - #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL - #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL - #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL - #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL - #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL - #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL - #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL - #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL - #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL - #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL - #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL - #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL - #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL - #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL - #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL - #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL - #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL - #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL - #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL - #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL + #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL + #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL + #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL + #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL + #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL + #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -1122,6 +1168,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL + #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -1204,7 +1251,8 @@ struct hwrm_func_qcfg_output { __le16 alloc_stat_ctx; __le16 alloc_msix; __le16 registered_vfs; - u8 unused_1[3]; + __le16 l2_doorbell_bar_size_kb; + u8 unused_1; u8 always_1; __le32 reset_addr_poll; u8 unused_2[3]; @@ -1241,6 +1289,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL + #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -1358,7 +1407,11 @@ struct hwrm_func_qstats_input { __le16 target_id; __le64 resp_addr; __le16 fid; - u8 unused_0[6]; + u8 flags; + #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL + #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL + #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY + u8 unused_0[5]; }; /* hwrm_func_qstats_output (size:1408b/176B) */ @@ -2916,7 +2969,7 @@ struct tx_port_stats_ext { __le64 pfc_pri7_tx_transitions; }; -/* rx_port_stats_ext (size:2624b/328B) */ +/* rx_port_stats_ext (size:3648b/456B) */ struct rx_port_stats_ext { __le64 link_down_events; __le64 continuous_pause_events; @@ -2959,6 +3012,22 @@ struct rx_port_stats_ext { __le64 rx_buffer_passed_threshold; __le64 rx_pcs_symbol_err; __le64 rx_corrected_bits; + __le64 rx_discard_bytes_cos0; + __le64 rx_discard_bytes_cos1; + __le64 rx_discard_bytes_cos2; + __le64 rx_discard_bytes_cos3; + __le64 rx_discard_bytes_cos4; + __le64 rx_discard_bytes_cos5; + __le64 rx_discard_bytes_cos6; + __le64 rx_discard_bytes_cos7; + __le64 rx_discard_packets_cos0; + __le64 rx_discard_packets_cos1; + __le64 rx_discard_packets_cos2; + __le64 rx_discard_packets_cos3; + __le64 rx_discard_packets_cos4; + __le64 rx_discard_packets_cos5; + __le64 rx_discard_packets_cos6; + __le64 rx_discard_packets_cos7; }; /* hwrm_port_qstats_ext_input (size:320b/40B) */ @@ -4693,7 +4762,7 @@ struct hwrm_vnic_free_output { u8 valid; }; -/* hwrm_vnic_cfg_input (size:320b/40B) */ +/* hwrm_vnic_cfg_input (size:384b/48B) */ struct hwrm_vnic_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -4716,6 +4785,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL + #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL __le16 vnic_id; __le16 dflt_ring_grp; __le16 rss_rule; @@ -4724,6 +4794,8 @@ struct hwrm_vnic_cfg_input { __le16 mru; __le16 default_rx_ring_id; __le16 default_cmpl_ring_id; + __le16 queue_id; + u8 unused0[6]; }; /* hwrm_vnic_cfg_output (size:128b/16B) */ @@ -4764,6 +4836,7 @@ struct hwrm_vnic_qcaps_output { #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL + #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL __le16 max_aggs_supported; u8 unused_1[5]; u8 valid; @@ -6115,6 +6188,21 @@ struct hwrm_cfa_flow_alloc_output { u8 valid; }; +/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_flow_alloc_cmd_err { + u8 code; + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB + u8 unused_0[7]; +}; + /* hwrm_cfa_flow_free_input (size:256b/32B) */ struct hwrm_cfa_flow_free_input { __le16 req_type; @@ -6305,7 +6393,7 @@ struct hwrm_cfa_eem_qcaps_input { __le32 unused_0; }; -/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */ +/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */ struct hwrm_cfa_eem_qcaps_output { __le16 error_code; __le16 req_type; @@ -6322,15 +6410,17 @@ struct hwrm_cfa_eem_qcaps_output { #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL __le32 max_entries_supported; __le16 key_entry_size; __le16 record_entry_size; __le16 efc_entry_size; - u8 unused_1; + __le16 fid_entry_size; + u8 unused_1[7]; u8 valid; }; -/* hwrm_cfa_eem_cfg_input (size:320b/40B) */ +/* hwrm_cfa_eem_cfg_input (size:384b/48B) */ struct hwrm_cfa_eem_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -6350,6 +6440,9 @@ struct hwrm_cfa_eem_cfg_input { __le16 key1_ctx_id; __le16 record_ctx_id; __le16 efc_ctx_id; + __le16 fid_ctx_id; + __le16 unused_2; + __le32 unused_3; }; /* hwrm_cfa_eem_cfg_output (size:128b/16B) */ @@ -6375,7 +6468,7 @@ struct hwrm_cfa_eem_qcfg_input { __le32 unused_0; }; -/* hwrm_cfa_eem_qcfg_output (size:192b/24B) */ +/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */ struct hwrm_cfa_eem_qcfg_output { __le16 error_code; __le16 req_type; @@ -6386,7 +6479,12 @@ struct hwrm_cfa_eem_qcfg_output { #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL __le32 num_entries; - u8 unused_0[7]; + __le16 key0_ctx_id; + __le16 key1_ctx_id; + __le16 record_ctx_id; + __le16 efc_ctx_id; + __le16 fid_ctx_id; + u8 unused_2[5]; u8 valid; }; @@ -6567,6 +6665,31 @@ struct ctx_hw_stats { __le64 tpa_aborts; }; +/* ctx_hw_stats_ext (size:1344b/168B) */ +struct ctx_hw_stats_ext { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; +}; + /* hwrm_stat_ctx_alloc_input (size:256b/32B) */ struct hwrm_stat_ctx_alloc_input { __le16 req_type; @@ -6578,7 +6701,8 @@ struct hwrm_stat_ctx_alloc_input { __le32 update_period_ms; u8 stat_ctx_flags; #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL - u8 unused_0[3]; + u8 unused_0; + __le16 stats_dma_length; }; /* hwrm_stat_ctx_alloc_output (size:128b/16B) */ @@ -6722,15 +6846,16 @@ struct hwrm_fw_reset_input { __le16 target_id; __le64 resp_addr; u8 embedded_proc_type; - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION u8 selfrst_status; #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL @@ -7053,7 +7178,14 @@ struct hwrm_temp_monitor_query_output { __le16 seq_id; __le16 resp_len; u8 temp; - u8 unused_0[6]; + u8 phy_temp; + u8 om_temp; + u8 flags; + #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL + u8 unused_0[3]; u8 valid; }; @@ -7204,7 +7336,9 @@ struct coredump_segment_record { u8 version_hi; u8 version_low; u8 seg_flags; - u8 unused_0[7]; + u8 compress_flags; + #define SFLAG_COMPRESSED_ZLIB 0x1UL + u8 unused_0[6]; }; /* hwrm_dbg_coredump_list_input (size:256b/32B) */ @@ -7729,6 +7863,9 @@ struct hwrm_nvm_set_variable_input { #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1) #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1) #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH + #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL + #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4 + #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL u8 unused_0; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 2b90a2bb1a1d..f6f3454d6059 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -25,7 +25,6 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, struct bnxt_vf_info *vf, u16 event_id) { - struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_fwd_async_event_cmpl_input req = {0}; struct hwrm_async_event_cmpl *async_cmpl; int rc = 0; @@ -40,23 +39,10 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); async_cmpl->event_id = cpu_to_le16(event_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - - if (rc) { + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", rc); - goto fwd_async_event_cmpl_exit; - } - - if (resp->error_code) { - netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", - resp->error_code); - rc = -1; - } - -fwd_async_event_cmpl_exit: - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -133,7 +119,7 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) { mutex_unlock(&bp->hwrm_cmd_lock); - return -EIO; + return rc; } vf->func_qcfg_flags = le16_to_cpu(resp->flags); mutex_unlock(&bp->hwrm_cmd_lock); @@ -164,9 +150,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) else req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - return -EIO; - return 0; + return rc; } int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) @@ -486,10 +470,43 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +/* Caller holds bp->hwrm_cmd_lock mutex lock */ +static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) +{ + struct hwrm_func_cfg_input req = {0}; + struct bnxt_vf_info *vf; + + vf = &bp->pf.vf[vf_id]; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); + req.flags = cpu_to_le32(vf->func_flags); + + if (is_valid_ether_addr(vf->mac_addr)) { + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN); + } + if (vf->vlan) { + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + req.dflt_vlan = cpu_to_le16(vf->vlan); + } + if (vf->max_tx_rate) { + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); + req.max_bw = cpu_to_le32(vf->max_tx_rate); +#ifdef HAVE_IFLA_TX_RATE + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); + req.min_bw = cpu_to_le32(vf->min_tx_rate); +#endif + } + if (vf->flags & BNXT_VF_TRUST) + req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + + _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + /* Only called by PF to reserve resources for VFs, returns actual number of * VFs configured, or < 0 on error. */ -static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) +static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) { struct hwrm_func_vf_resource_cfg_input req = {0}; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; @@ -561,13 +578,14 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < num_vfs; i++) { + if (reset) + __bnxt_set_vf_params(bp, i); + req.vf_id = cpu_to_le16(pf->first_vf_id + i); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) { - rc = -ENOMEM; + if (rc) break; - } pf->active_vfs = i + 1; pf->vf[i].fw_fid = pf->first_vf_id + i; } @@ -664,8 +682,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) total_vf_tx_rings += vf_tx_rsvd; } mutex_unlock(&bp->hwrm_cmd_lock); - if (rc) - rc = -ENOMEM; if (pf->active_vfs) { hw_resc->max_tx_rings -= total_vf_tx_rings; hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; @@ -679,14 +695,40 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) return rc; } -static int bnxt_func_cfg(struct bnxt *bp, int num_vfs) +static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset) { if (BNXT_NEW_RM(bp)) - return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs); + return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset); else return bnxt_hwrm_func_cfg(bp, num_vfs); } +int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) +{ + int rc; + + /* Register buffers for VFs */ + rc = bnxt_hwrm_func_buf_rgtr(bp); + if (rc) + return rc; + + /* Reserve resources for VFs */ + rc = bnxt_func_cfg(bp, *num_vfs, reset); + if (rc != *num_vfs) { + if (rc <= 0) { + netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); + *num_vfs = 0; + return rc; + } + netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", + rc); + *num_vfs = rc; + } + + bnxt_ulp_sriov_cfg(bp, *num_vfs); + return 0; +} + static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) { int rc = 0, vfs_supported; @@ -752,25 +794,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) if (rc) goto err_out1; - /* Reserve resources for VFs */ - rc = bnxt_func_cfg(bp, *num_vfs); - if (rc != *num_vfs) { - if (rc <= 0) { - netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); - *num_vfs = 0; - goto err_out2; - } - netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc); - *num_vfs = rc; - } - - /* Register buffers for VFs */ - rc = bnxt_hwrm_func_buf_rgtr(bp); + rc = bnxt_cfg_hw_sriov(bp, num_vfs, false); if (rc) goto err_out2; - bnxt_ulp_sriov_cfg(bp, *num_vfs); - rc = pci_enable_sriov(bp->pdev, *num_vfs); if (rc) goto err_out2; @@ -837,6 +864,11 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) rtnl_unlock(); return 0; } + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n"); + rtnl_unlock(); + return 0; + } bp->sriov_cfg = true; rtnl_unlock(); @@ -870,7 +902,6 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, { int rc = 0; struct hwrm_fwd_resp_input req = {0}; - struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; @@ -885,22 +916,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, req.encap_resp_cmpl_ring = encap_resp_cpr; memcpy(req.encap_resp, encap_resp, msg_size); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - - if (rc) { + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); - goto fwd_resp_exit; - } - - if (resp->error_code) { - netdev_err(bp->dev, "hwrm_fwd_resp error %d\n", - resp->error_code); - rc = -1; - } - -fwd_resp_exit: - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -909,7 +927,6 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, { int rc = 0; struct hwrm_reject_fwd_resp_input req = {0}; - struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; @@ -920,22 +937,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - - if (rc) { + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); - goto fwd_err_resp_exit; - } - - if (resp->error_code) { - netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n", - resp->error_code); - rc = -1; - } - -fwd_err_resp_exit: - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -944,7 +948,6 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, { int rc = 0; struct hwrm_exec_fwd_resp_input req = {0}; - struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; @@ -955,22 +958,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - - if (rc) { + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); - goto exec_fwd_resp_exit; - } - - if (resp->error_code) { - netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", - resp->error_code); - rc = -1; - } - -exec_fwd_resp_exit: - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -1190,6 +1180,13 @@ mac_done: } #else +int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) +{ + if (*num_vfs) + return -EOPNOTSUPP; + return 0; +} + void bnxt_sriov_disable(struct bnxt *bp) { } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index 2eed9eda1195..629641bf6fc5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -36,6 +36,7 @@ int bnxt_set_vf_link_state(struct net_device *, int, int); int bnxt_set_vf_spoofchk(struct net_device *, int, bool); int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust); int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); +int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset); void bnxt_sriov_disable(struct bnxt *); void bnxt_hwrm_exec_fwd_req(struct bnxt *); void bnxt_update_vf_mac(struct bnxt *); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index dd621f6bd127..c8062d020d1e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -319,8 +319,6 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, if (rc) netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); - if (rc) - rc = -EIO; return rc; } @@ -515,11 +513,6 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, } } mutex_unlock(&bp->hwrm_cmd_lock); - - if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) - rc = -ENOSPC; - else if (rc) - rc = -EIO; return rc; } @@ -591,8 +584,6 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, } mutex_unlock(&bp->hwrm_cmd_lock); - if (rc) - rc = -EIO; return rc; } @@ -609,8 +600,6 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp, if (rc) netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); - if (rc) - rc = -EIO; return rc; } @@ -660,8 +649,6 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, } mutex_unlock(&bp->hwrm_cmd_lock); - if (rc) - rc = -EIO; return rc; } @@ -678,8 +665,6 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp, if (rc) netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); - if (rc) - rc = -EIO; return rc; } @@ -1457,8 +1442,6 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, } mutex_unlock(&bp->hwrm_cmd_lock); - if (rc) - rc = -EIO; return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index fc77caf0a076..b2c160947fc8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -226,6 +226,9 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, struct input *req; int rc; + if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state) + return -EBUSY; + mutex_lock(&bp->hwrm_cmd_lock); req = fw_msg->msg; req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 57dc3cbff36e..155599dcee76 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -4096,12 +4096,16 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; u32 port_id; + int i; cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock), GFP_KERNEL); if (!cp->csk_tbl) return -ENOMEM; + for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) + atomic_set(&cp->csk_tbl[i].ref_count, 0); + port_id = prandom_u32(); port_id %= CNIC_LOCAL_PORT_RANGE; if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, @@ -5480,6 +5484,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, cdev->unregister_device = cnic_unregister_device; cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl; + atomic_set(&cdev->ref_count, 0); cp = cdev->cnic_priv; cp->dev = cdev; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index b22196880d6d..12cb77ef1081 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2516,19 +2516,14 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) { struct netdev_queue *txq; - struct sk_buff *skb; - struct enet_cb *cb; int i; bcmgenet_fini_rx_napi(priv); bcmgenet_fini_tx_napi(priv); - for (i = 0; i < priv->num_tx_bds; i++) { - cb = priv->tx_cbs + i; - skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb); - if (skb) - dev_kfree_skb(skb); - } + for (i = 0; i < priv->num_tx_bds; i++) + dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, + priv->tx_cbs + i)); for (i = 0; i < priv->hw_params->tx_queues; i++) { txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); @@ -3438,7 +3433,6 @@ static int bcmgenet_probe(struct platform_device *pdev) struct bcmgenet_priv *priv; struct net_device *dev; const void *macaddr; - struct resource *r; unsigned int i; int err = -EIO; const char *phy_mode_str; @@ -3478,8 +3472,7 @@ static int bcmgenet_probe(struct platform_device *pdev) macaddr = pd->mac_address; } - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, r); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { err = PTR_ERR(priv->base); goto err; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 4c404d2213f9..77f3511b97de 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18041,8 +18041,7 @@ static void tg3_remove_one(struct pci_dev *pdev) #ifdef CONFIG_PM_SLEEP static int tg3_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct tg3 *tp = netdev_priv(dev); int err = 0; @@ -18098,8 +18097,7 @@ unlock: static int tg3_resume(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct tg3 *tp = netdev_priv(dev); int err = 0; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 7767ae6fa1fd..e338272931d1 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3032,7 +3032,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) head_unmap->nvecs++; for (i = 0, vect_id = 0; i < vectors - 1; i++) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; u32 size = skb_frag_size(frag); if (unlikely(size == 0)) { diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 99f49d059414..f96a42af1014 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1104,7 +1104,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - len = frag->size; + len = skb_frag_size(frag); paddr = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index eab805579f96..7f3b2e3b0868 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1492,11 +1492,11 @@ static void free_netsgbuf(void *buf) i = 1; while (frags--) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; pci_unmap_page((lio->oct_dev)->pci_dev, g->sg[(i >> 2)].ptr[(i & 3)], - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); i++; } @@ -1535,11 +1535,11 @@ static void free_netsgbuf_with_resp(void *buf) i = 1; while (frags--) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; pci_unmap_page((lio->oct_dev)->pci_dev, g->sg[(i >> 2)].ptr[(i & 3)], - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); i++; } @@ -2424,7 +2424,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) } else { int i, frags; - struct skb_frag_struct *frag; + skb_frag_t *frag; struct octnic_gather *g; spin_lock(&lio->glist_lock[q_idx]); @@ -2462,11 +2462,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[i - 1]; g->sg[(i >> 2)].ptr[(i & 3)] = - dma_map_page(&oct->pci_dev->dev, - frag->page.p, - frag->page_offset, - frag->size, - DMA_TO_DEVICE); + skb_frag_dma_map(&oct->pci_dev->dev, + frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); if (dma_mapping_error(&oct->pci_dev->dev, g->sg[i >> 2].ptr[i & 3])) { @@ -2478,7 +2476,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[j - 1]; dma_unmap_page(&oct->pci_dev->dev, g->sg[j >> 2].ptr[j & 3], - frag->size, + skb_frag_size(frag), DMA_TO_DEVICE); } dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", @@ -2486,7 +2484,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } - add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); + add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), + (i & 3)); i++; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index db0b90555acb..370d76822ee0 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -837,11 +837,11 @@ static void free_netsgbuf(void *buf) i = 1; while (frags--) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; pci_unmap_page((lio->oct_dev)->pci_dev, g->sg[(i >> 2)].ptr[(i & 3)], - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); i++; } @@ -881,11 +881,11 @@ static void free_netsgbuf_with_resp(void *buf) i = 1; while (frags--) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; pci_unmap_page((lio->oct_dev)->pci_dev, g->sg[(i >> 2)].ptr[(i & 3)], - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); i++; } @@ -1497,7 +1497,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ndata.reqtype = REQTYPE_NORESP_NET; } else { - struct skb_frag_struct *frag; + skb_frag_t *frag; struct octnic_gather *g; int i, frags; @@ -1535,11 +1535,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[i - 1]; g->sg[(i >> 2)].ptr[(i & 3)] = - dma_map_page(&oct->pci_dev->dev, - frag->page.p, - frag->page_offset, - frag->size, - DMA_TO_DEVICE); + skb_frag_dma_map(&oct->pci_dev->dev, + frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); if (dma_mapping_error(&oct->pci_dev->dev, g->sg[i >> 2].ptr[i & 3])) { dma_unmap_single(&oct->pci_dev->dev, @@ -1550,7 +1548,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[j - 1]; dma_unmap_page(&oct->pci_dev->dev, g->sg[j >> 2].ptr[j & 3], - frag->size, + skb_frag_size(frag), DMA_TO_DEVICE); } dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", @@ -1558,7 +1556,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } - add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); + add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), + (i & 3)); i++; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c index 021d99cd1665..614d07be7181 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -260,9 +260,7 @@ static int octeon_mbox_process_cmd(struct octeon_mbox *mbox, dev_info(&oct->pci_dev->dev, "got a request for FLR from VF that owns DPI ring %u\n", mbox->q_no); - pcie_capability_set_word( - oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no], - PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]); break; case OCTEON_PF_CHANGED_VF_MACADDR: diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 192bc92da881..4ab57d33a87e 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1588,15 +1588,13 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, goto doorbell; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; qentry = nicvf_get_nxt_sqentry(sq, qentry); size = skb_frag_size(frag); dma_addr = dma_map_page_attrs(&nic->pdev->dev, skb_frag_page(frag), - frag->page_offset, size, + skb_frag_off(frag), size, DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 89db739b7819..6dabbf1502c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, struct port_info *pi = netdev_priv(qs->netdev); struct sk_buff *skb = NULL; struct cpl_rx_pkt *cpl; - struct skb_frag_struct *rx_frag; + skb_frag_t *rx_frag; int nr_frags; int offset = 0; @@ -2182,7 +2182,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, rx_frag += nr_frags; __skb_frag_set_page(rx_frag, sd->pg_chunk.page); - rx_frag->page_offset = sd->pg_chunk.offset + offset; + skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset); skb_frag_size_set(rx_frag, len); skb->len += len; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index d692251ee252..ae6a47dd7dc9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -3531,7 +3531,6 @@ int t4_setup_debugfs(struct adapter *adap) { int i; u32 size = 0; - struct dentry *de; static struct t4_debugfs_entry t4_debugfs_files[] = { { "cim_la", &cim_la_fops, 0400, 0 }, @@ -3642,8 +3641,8 @@ int t4_setup_debugfs(struct adapter *adap) } } - de = debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap, - &flash_debugfs_fops, adap->params.sf_size); + debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap, + &flash_debugfs_fops, adap->params.sf_size); debugfs_create_bool("use_backdoor", 0600, adap->debugfs_root, &adap->use_bd); debugfs_create_bool("trace_rss", 0600, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 4311ad9c84b2..71854a19cebe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -6269,10 +6269,7 @@ static int __init cxgb4_init_module(void) { int ret; - /* Debugfs support is optional, just warn if this fails */ cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (!cxgb4_debugfs_root) - pr_warn("could not create debugfs entry, continuing\n"); ret = pci_register_driver(&cxgb4_driver); if (ret < 0) diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c index eaf1fb74689c..01c65d13fc0e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/smt.c +++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c @@ -57,7 +57,7 @@ struct smt_data *t4_init_smt(void) s->smtab[i].state = SMT_STATE_UNUSED; memset(&s->smtab[i].src_mac, 0, ETH_ALEN); spin_lock_init(&s->smtab[i].lock); - atomic_set(&s->smtab[i].refcnt, 0); + s->smtab[i].refcnt = 0; } return s; } @@ -68,7 +68,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac) struct smt_entry *e, *end; for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) { - if (atomic_read(&e->refcnt) == 0) { + if (e->refcnt == 0) { if (!first_free) first_free = e; } else { @@ -97,11 +97,9 @@ found_reuse: static void t4_smte_free(struct smt_entry *e) { - spin_lock_bh(&e->lock); - if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + if (e->refcnt == 0) { /* hasn't been recycled */ e->state = SMT_STATE_UNUSED; } - spin_unlock_bh(&e->lock); } /** @@ -111,8 +109,10 @@ static void t4_smte_free(struct smt_entry *e) */ void cxgb4_smt_release(struct smt_entry *e) { - if (atomic_dec_and_test(&e->refcnt)) + spin_lock_bh(&e->lock); + if ((--e->refcnt) == 0) t4_smte_free(e); + spin_unlock_bh(&e->lock); } EXPORT_SYMBOL(cxgb4_smt_release); @@ -215,14 +215,14 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf, e = find_or_alloc_smte(s, smac); if (e) { spin_lock(&e->lock); - if (!atomic_read(&e->refcnt)) { - atomic_set(&e->refcnt, 1); + if (!e->refcnt) { + e->refcnt = 1; e->state = SMT_STATE_SWITCHING; e->pfvf = pfvf; memcpy(e->src_mac, smac, ETH_ALEN); write_smt_entry(adap, e); } else { - atomic_inc(&e->refcnt); + ++e->refcnt; } spin_unlock(&e->lock); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h index d6c2cc271398..1268d6e93a47 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/smt.h +++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h @@ -59,7 +59,7 @@ struct smt_entry { u16 idx; u16 pfvf; u8 src_mac[ETH_ALEN]; - atomic_t refcnt; + int refcnt; spinlock_t lock; /* protect smt entry add,removal */ }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index f7fc553356f2..f2a7824da42b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -329,7 +329,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, for (i = 0; ; i += ms) { /* If we've waited too long, return a busy indication. This * really ought to be based on our initial position in the - * mailbox access list but this is a start. We very rearely + * mailbox access list but this is a start. We very rarely * contend on access to the mailbox ... */ pcie_fw = t4_read_reg(adap, PCIE_FW_A); @@ -606,7 +606,7 @@ void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf, * * Reads/writes an [almost] arbitrary memory region in the firmware: the * firmware memory address and host buffer must be aligned on 32-bit - * boudaries; the length may be arbitrary. The memory is transferred as + * boundaries; the length may be arbitrary. The memory is transferred as * a raw byte sequence from/to the firmware's memory. If this memory * contains data structures which contain multi-byte integers, it's the * caller's responsibility to perform appropriate byte order conversions. @@ -3774,7 +3774,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) * A negative error number will be returned if an error occurs. If * version number support is available and there's no need to upgrade * the firmware, 0 will be returned. If firmware is successfully - * transferred to the adapter, 1 will be retured. + * transferred to the adapter, 1 will be returned. * * NOTE: some adapters only have local RAM to store the PHY firmware. As * a result, a RESET of the adapter would cause that RAM to lose its @@ -3808,7 +3808,7 @@ int t4_load_phy_fw(struct adapter *adap, } /* Ask the firmware where it wants us to copy the PHY firmware image. - * The size of the file requires a special version of the READ coommand + * The size of the file requires a special version of the READ command * which will pass the file size via the values field in PARAMS_CMD and * retrieve the return value from firmware and place it in the same * buffer values @@ -4082,7 +4082,7 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) fw_pause |= FW_PORT_CAP32_FORCE_PAUSE; /* Translate orthogonal Pause controls into IEEE 802.3 Pause, - * Asymetrical Pause for use in reporting to upper layer OS code, etc. + * Asymmetrical Pause for use in reporting to upper layer OS code, etc. * Note that these bits are ignored in L1 Configure commands. */ if (cc_pause & PAUSE_RX) { @@ -4151,7 +4151,7 @@ fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port, /* Convert Common Code Forward Error Control settings into the * Firmware's API. If the current Requested FEC has "Automatic" * (IEEE 802.3) specified, then we use whatever the Firmware - * sent us as part of it's IEEE 802.3-based interpratation of + * sent us as part of its IEEE 802.3-based interpretation of * the Transceiver Module EPROM FEC parameters. Otherwise we * use whatever is in the current Requested FEC settings. */ @@ -4248,7 +4248,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, /* Unfortunately, even if the Requested Port Capabilities "fit" within * the Physical Port Capabilities, some combinations of features may - * still not be leagal. For example, 40Gb/s and Reed-Solomon Forward + * still not be legal. For example, 40Gb/s and Reed-Solomon Forward * Error Correction. So if the Firmware rejects the L1 Configure * request, flag that here. */ @@ -6797,7 +6797,7 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type) } /** - * t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values + * t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values * @adap - the adapter * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table * @dbqtimers: SGE Doorbell Queue Timer table @@ -6925,8 +6925,8 @@ retry: waiting -= 50; /* - * If neither Error nor Initialialized are indicated - * by the firmware keep waiting till we exaust our + * If neither Error nor Initialized are indicated + * by the firmware keep waiting till we exhaust our * timeout ... and then retry if we haven't exhausted * our retries ... */ @@ -7238,7 +7238,7 @@ int t4_fl_pkt_align(struct adapter *adap) * separately. The actual Ingress Packet Data alignment boundary * within Packed Buffer Mode is the maximum of these two * specifications. (Note that it makes no real practical sense to - * have the Pading Boudary be larger than the Packing Boundary but you + * have the Padding Boundary be larger than the Packing Boundary but you * could set the chip up that way and, in fact, legacy T4 code would * end doing this because it would initialize the Padding Boundary and * leave the Packing Boundary initialized to 0 (16 bytes).) @@ -8973,10 +8973,10 @@ static int t4_get_flash_params(struct adapter *adap) goto found; } - /* Decode Flash part size. The code below looks repetative with + /* Decode Flash part size. The code below looks repetitive with * common encodings, but that's not guaranteed in the JEDEC - * specification for the Read JADEC ID command. The only thing that - * we're guaranteed by the JADEC specification is where the + * specification for the Read JEDEC ID command. The only thing that + * we're guaranteed by the JEDEC specification is where the * Manufacturer ID is in the returned result. After that each * Manufacturer ~could~ encode things completely differently. * Note, all Flash parts must have 64KB sectors. @@ -9317,7 +9317,7 @@ int t4_init_devlog_params(struct adapter *adap) struct fw_devlog_cmd devlog_cmd; int ret; - /* If we're dealing with newer firmware, the Device Log Paramerters + /* If we're dealing with newer firmware, the Device Log Parameters * are stored in a designated register which allows us to access the * Device Log even if we can't talk to the firmware. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 6d4cf3d0b2f0..f6fc0875d5b0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2478,11 +2478,10 @@ static int setup_debugfs(struct adapter *adapter) * Debugfs support is best effort. */ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) - (void)debugfs_create_file(debugfs_files[i].name, - debugfs_files[i].mode, - adapter->debugfs_root, - (void *)adapter, - debugfs_files[i].fops); + debugfs_create_file(debugfs_files[i].name, + debugfs_files[i].mode, + adapter->debugfs_root, (void *)adapter, + debugfs_files[i].fops); return 0; } @@ -3257,11 +3256,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), cxgb4vf_debugfs_root); - if (IS_ERR_OR_NULL(adapter->debugfs_root)) - dev_warn(&pdev->dev, "could not create debugfs" - " directory"); - else - setup_debugfs(adapter); + setup_debugfs(adapter); } /* @@ -3486,13 +3481,11 @@ static int __init cxgb4vf_module_init(void) return -EINVAL; } - /* Debugfs support is optional, just warn if this fails */ + /* Debugfs support is optional, debugfs will warn if this fails */ cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) - pr_warn("could not create debugfs entry, continuing\n"); ret = pci_register_driver(&cxgb4vf_driver); - if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) + if (ret < 0) debugfs_remove(cxgb4vf_debugfs_root); return ret; } diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index b3e7fafee3df..c9aebcde403a 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1844,16 +1844,12 @@ cleanup_module(void) static int __init cs89x0_platform_probe(struct platform_device *pdev) { struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); - struct net_local *lp; - struct resource *mem_res; void __iomem *virt_addr; int err; if (!dev) return -ENOMEM; - lp = netdev_priv(dev); - dev->irq = platform_get_irq(pdev, 0); if (dev->irq <= 0) { dev_warn(&dev->dev, "interrupt resource missing\n"); @@ -1861,8 +1857,7 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev) goto free; } - mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - virt_addr = devm_ioremap_resource(&pdev->dev, mem_res); + virt_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(virt_addr)) { err = PTR_ERR(virt_addr); goto free; diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 9003eb6716cd..e736ce2c58ca 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -1182,9 +1182,8 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, buflen = skb_headlen(skb); } else { skb_frag = skb_si->frags + frag; - buffer = page_address(skb_frag_page(skb_frag)) + - skb_frag->page_offset; - buflen = skb_frag->size; + buffer = skb_frag_address(skb_frag); + buflen = skb_frag_size(skb_frag); } if (frag == last_frag) { @@ -2423,10 +2422,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) /* Interrupt */ irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(dev, "no IRQ\n"); + if (irq <= 0) return irq ? irq : -ENODEV; - } port->irq = irq; /* Clock the port */ diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 386bdc1378d1..cce90b5925d9 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1500,8 +1500,6 @@ dm9000_probe(struct platform_device *pdev) ndev->irq = platform_get_irq(pdev, 0); if (ndev->irq < 0) { - dev_err(db->dev, "interrupt resource unavailable: %d\n", - ndev->irq); ret = ndev->irq; goto out; } diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index f287b5da5546..cf3e6f2892ff 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -192,7 +192,6 @@ struct be_eq_obj { } ____cacheline_aligned_in_smp; struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ - bool enable; u32 min_eqd; /* in usecs */ u32 max_eqd; /* in usecs */ u32 prev_eqd; /* in usecs */ @@ -589,6 +588,7 @@ struct be_adapter { struct be_drv_stats drv_stats; struct be_aic_obj aic_obj[MAX_EVT_QS]; + bool aic_enabled; u8 vlan_prio_bmap; /* Available Priority BitMap */ u16 recommended_prio_bits;/* Recommended Priority bits in vlan tag */ struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 323976c811e9..701c12c9e033 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2756,7 +2756,7 @@ static int be_flash_BEx(struct be_adapter *adapter, bool crc_match; const u8 *p; - struct flash_comp gen3_flash_types[] = { + static const struct flash_comp gen3_flash_types[] = { { BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE, BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI}, { BE3_REDBOOT_START, OPTYPE_REDBOOT, @@ -2779,7 +2779,7 @@ static int be_flash_BEx(struct be_adapter *adapter, BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY} }; - struct flash_comp gen2_flash_types[] = { + static const struct flash_comp gen2_flash_types[] = { { BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE, BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI}, { BE2_REDBOOT_START, OPTYPE_REDBOOT, diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 492f8769ac12..5bb5abf99588 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -329,8 +329,8 @@ static int be_get_coalesce(struct net_device *netdev, et->tx_coalesce_usecs_high = aic->max_eqd; et->tx_coalesce_usecs_low = aic->min_eqd; - et->use_adaptive_rx_coalesce = aic->enable; - et->use_adaptive_tx_coalesce = aic->enable; + et->use_adaptive_rx_coalesce = adapter->aic_enabled; + et->use_adaptive_tx_coalesce = adapter->aic_enabled; return 0; } @@ -346,8 +346,9 @@ static int be_set_coalesce(struct net_device *netdev, struct be_eq_obj *eqo; int i; + adapter->aic_enabled = et->use_adaptive_rx_coalesce; + for_all_evt_queues(adapter, eqo, i) { - aic->enable = et->use_adaptive_rx_coalesce; aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD); aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd); aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 4d8e40ac66d2..39eb7d525043 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1014,7 +1014,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); @@ -2147,7 +2147,7 @@ static int be_get_new_eqd(struct be_eq_obj *eqo) int i; aic = &adapter->aic_obj[eqo->idx]; - if (!aic->enable) { + if (!adapter->aic_enabled) { if (aic->jiffies) aic->jiffies = 0; eqd = aic->et_eqd; @@ -2204,7 +2204,7 @@ static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo) int eqd; u32 mult_enc; - if (!aic->enable) + if (!adapter->aic_enabled) return 0; if (jiffies_to_msecs(now - aic->jiffies) < 1) @@ -2346,8 +2346,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, memcpy(skb->data, start, hdr_len); skb_shinfo(skb)->nr_frags = 1; skb_frag_set_page(skb, 0, page_info->page); - skb_shinfo(skb)->frags[0].page_offset = - page_info->page_offset + hdr_len; + skb_frag_off_set(&skb_shinfo(skb)->frags[0], + page_info->page_offset + hdr_len); skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len); skb->data_len = curr_frag_len - hdr_len; @@ -2372,8 +2372,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, /* Fresh page */ j++; skb_frag_set_page(skb, j, page_info->page); - skb_shinfo(skb)->frags[j].page_offset = - page_info->page_offset; + skb_frag_off_set(&skb_shinfo(skb)->frags[j], + page_info->page_offset); skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); skb_shinfo(skb)->nr_frags++; } else { @@ -2454,8 +2454,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, /* First frag or Fresh page */ j++; skb_frag_set_page(skb, j, page_info->page); - skb_shinfo(skb)->frags[j].page_offset = - page_info->page_offset; + skb_frag_off_set(&skb_shinfo(skb)->frags[j], + page_info->page_offset); skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); } else { put_page(page_info->page); @@ -2959,6 +2959,8 @@ static int be_evt_queues_create(struct be_adapter *adapter) max(adapter->cfg_num_rx_irqs, adapter->cfg_num_tx_irqs)); + adapter->aic_enabled = true; + for_all_evt_queues(adapter, eqo, i) { int numa_node = dev_to_node(&adapter->pdev->dev); @@ -2966,7 +2968,6 @@ static int be_evt_queues_create(struct be_adapter *adapter) eqo->adapter = adapter; eqo->idx = i; aic->max_eqd = BE_MAX_EQD; - aic->enable = true; eq = &eqo->q; rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 027225e1ade2..815fb62c4b02 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -576,7 +576,6 @@ static s32 nps_enet_probe(struct platform_device *pdev) struct nps_enet_priv *priv; s32 err = 0; const char *mac_addr; - struct resource *res_regs; if (!dev->of_node) return -ENODEV; @@ -595,8 +594,7 @@ static s32 nps_enet_probe(struct platform_device *pdev) /* FIXME :: no multicast support yet */ ndev->flags &= ~IFF_MULTICAST; - res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->regs_base = devm_ioremap_resource(dev, res_regs); + priv->regs_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs_base)) { err = PTR_ERR(priv->regs_base); goto out_netdev; diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig index a9b105803fb7..73e4f2648e49 100644 --- a/drivers/net/ethernet/faraday/Kconfig +++ b/drivers/net/ethernet/faraday/Kconfig @@ -32,6 +32,7 @@ config FTGMAC100 depends on ARM || NDS32 || COMPILE_TEST depends on !64BIT || BROKEN select PHYLIB + select MDIO_ASPEED if MACH_ASPEED_G6 ---help--- This driver supports the FTGMAC100 Gigabit Ethernet controller from Faraday. It is used on Faraday A369, Andes AG102 and some diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 030fed65393e..9b7af94a40bb 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> +#include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/property.h> @@ -774,7 +775,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - len = frag->size; + len = skb_frag_size(frag); /* Map it */ map = skb_frag_dma_map(priv->dev, frag, 0, len, @@ -1619,8 +1620,13 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) if (!priv->mii_bus) return -EIO; - if (priv->is_aspeed) { - /* This driver supports the old MDIO interface */ + if (of_device_is_compatible(np, "aspeed,ast2400-mac") || + of_device_is_compatible(np, "aspeed,ast2500-mac")) { + /* The AST2600 has a separate MDIO controller */ + + /* For the AST2400 and AST2500 this driver only supports the + * old MDIO interface + */ reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); @@ -1797,7 +1803,8 @@ static int ftgmac100_probe(struct platform_device *pdev) np = pdev->dev.of_node; if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") || - of_device_is_compatible(np, "aspeed,ast2500-mac"))) { + of_device_is_compatible(np, "aspeed,ast2500-mac") || + of_device_is_compatible(np, "aspeed,ast2600-mac"))) { priv->rxdes0_edorr_mask = BIT(30); priv->txdes0_edotr_mask = BIT(30); priv->is_aspeed = true; @@ -1817,7 +1824,29 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); if (!priv->ndev) goto err_ncsi_dev; - } else { + } else if (np && of_get_property(np, "phy-handle", NULL)) { + struct phy_device *phy; + + phy = of_phy_get_and_connect(priv->netdev, np, + &ftgmac100_adjust_link); + if (!phy) { + dev_err(&pdev->dev, "Failed to connect to phy\n"); + goto err_setup_mdio; + } + + /* Indicate that we support PAUSE frames (see comment in + * Documentation/networking/phy.txt) + */ + phy_support_asym_pause(phy); + + /* Display what we found */ + phy_attached_info(phy); + } else if (np && !of_get_child_by_name(np, "mdio")) { + /* Support legacy ASPEED devicetree descriptions that decribe a + * MAC with an embedded MDIO controller but have no "mdio" + * child node. Automatically scan the MDIO bus for available + * PHYs. + */ priv->use_ncsi = false; err = ftgmac100_setup_mdio(netdev); if (err) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index f38c3fa7d705..b4b82b9c5cd6 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -485,7 +485,7 @@ static struct dpaa_bp *dpaa_bpid2pool(int bpid) static bool dpaa_bpid2pool_use(int bpid) { if (dpaa_bpid2pool(bpid)) { - atomic_inc(&dpaa_bp_array[bpid]->refs); + refcount_inc(&dpaa_bp_array[bpid]->refs); return true; } @@ -496,7 +496,7 @@ static bool dpaa_bpid2pool_use(int bpid) static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp) { dpaa_bp_array[bpid] = dpaa_bp; - atomic_set(&dpaa_bp->refs, 1); + refcount_set(&dpaa_bp->refs, 1); } static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp) @@ -584,7 +584,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp) if (!bp) return; - if (!atomic_dec_and_test(&bp->refs)) + if (!refcount_dec_and_test(&bp->refs)) return; if (bp->free_buf_cb) @@ -1958,7 +1958,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, /* populate the rest of SGT entries */ for (i = 0; i < nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; - frag_len = frag->size; + frag_len = skb_frag_size(frag); WARN_ON(!skb_frag_page(frag)); addr = skb_frag_dma_map(dev, frag, 0, frag_len, dma_dir); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index af320f83c742..f7e59e8db075 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -32,6 +32,7 @@ #define __DPAA_H #include <linux/netdevice.h> +#include <linux/refcount.h> #include <soc/fsl/qman.h> #include <soc/fsl/bman.h> @@ -99,7 +100,7 @@ struct dpaa_bp { int (*seed_cb)(struct dpaa_bp *); /* bpool can be emptied before freeing by this cb */ void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *); - atomic_t refs; + refcount_t refs; }; struct dpaa_rx_errors { diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c index a027f4a9d0cc..a9afe46b837f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c @@ -164,70 +164,30 @@ static const struct file_operations dpaa2_dbg_ch_ops = { void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) { - if (!dpaa2_dbg_root) - return; + struct dentry *dir; /* Create a directory for the interface */ - priv->dbg.dir = debugfs_create_dir(priv->net_dev->name, - dpaa2_dbg_root); - if (!priv->dbg.dir) { - netdev_err(priv->net_dev, "debugfs_create_dir() failed\n"); - return; - } + dir = debugfs_create_dir(priv->net_dev->name, dpaa2_dbg_root); + priv->dbg.dir = dir; /* per-cpu stats file */ - priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444, - priv->dbg.dir, priv, - &dpaa2_dbg_cpu_ops); - if (!priv->dbg.cpu_stats) { - netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); - goto err_cpu_stats; - } + debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_ops); /* per-fq stats file */ - priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444, - priv->dbg.dir, priv, - &dpaa2_dbg_fq_ops); - if (!priv->dbg.fq_stats) { - netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); - goto err_fq_stats; - } + debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fq_ops); /* per-fq stats file */ - priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444, - priv->dbg.dir, priv, - &dpaa2_dbg_ch_ops); - if (!priv->dbg.fq_stats) { - netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); - goto err_ch_stats; - } - - return; - -err_ch_stats: - debugfs_remove(priv->dbg.fq_stats); -err_fq_stats: - debugfs_remove(priv->dbg.cpu_stats); -err_cpu_stats: - debugfs_remove(priv->dbg.dir); + debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_ops); } void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) { - debugfs_remove(priv->dbg.fq_stats); - debugfs_remove(priv->dbg.ch_stats); - debugfs_remove(priv->dbg.cpu_stats); - debugfs_remove(priv->dbg.dir); + debugfs_remove_recursive(priv->dbg.dir); } void dpaa2_eth_dbg_init(void) { dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL); - if (!dpaa2_dbg_root) { - pr_err("DPAA2-ETH: debugfs create failed\n"); - return; - } - pr_debug("DPAA2-ETH: debugfs created\n"); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h index 4f63de997a26..15598b28f03b 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h @@ -11,9 +11,6 @@ struct dpaa2_eth_priv; struct dpaa2_debugfs { struct dentry *dir; - struct dentry *fq_stats; - struct dentry *ch_stats; - struct dentry *cpu_stats; }; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 0acb11557ed1..162d7d8fb295 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1208,9 +1208,37 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv) } } +static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable) +{ + struct dpni_taildrop td = {0}; + int i, err; + + if (priv->rx_td_enabled == enable) + return; + + td.enable = enable; + td.threshold = DPAA2_ETH_TAILDROP_THRESH; + + for (i = 0; i < priv->num_fqs; i++) { + if (priv->fq[i].type != DPAA2_RX_FQ) + continue; + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, + DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0, + priv->fq[i].flowid, &td); + if (err) { + netdev_err(priv->net_dev, + "dpni_set_taildrop() failed\n"); + break; + } + } + + priv->rx_td_enabled = enable; +} + static int link_state_update(struct dpaa2_eth_priv *priv) { struct dpni_link_state state = {0}; + bool tx_pause; int err; err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); @@ -1220,11 +1248,18 @@ static int link_state_update(struct dpaa2_eth_priv *priv) return err; } + /* If Tx pause frame settings have changed, we need to update + * Rx FQ taildrop configuration as well. We configure taildrop + * only when pause frame generation is disabled. + */ + tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^ + !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE); + dpaa2_eth_set_rx_taildrop(priv, !tx_pause); + /* Chech link state; speed / duplex changes are not treated yet */ if (priv->link_state.up == state.up) - return 0; + goto out; - priv->link_state = state; if (state.up) { netif_carrier_on(priv->net_dev); netif_tx_start_all_queues(priv->net_dev); @@ -1236,6 +1271,9 @@ static int link_state_update(struct dpaa2_eth_priv *priv) netdev_info(priv->net_dev, "Link Event: state %s\n", state.up ? "up" : "down"); +out: + priv->link_state = state; + return 0; } @@ -1310,7 +1348,7 @@ static u32 ingress_fq_count(struct dpaa2_eth_priv *priv) return total; } -static void wait_for_fq_empty(struct dpaa2_eth_priv *priv) +static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) { int retries = 10; u32 pending; @@ -1322,6 +1360,31 @@ static void wait_for_fq_empty(struct dpaa2_eth_priv *priv) } while (pending && --retries); } +#define DPNI_TX_PENDING_VER_MAJOR 7 +#define DPNI_TX_PENDING_VER_MINOR 13 +static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) +{ + union dpni_statistics stats; + int retries = 10; + int err; + + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR, + DPNI_TX_PENDING_VER_MINOR) < 0) + goto out; + + do { + err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6, + &stats); + if (err) + goto out; + if (stats.page_6.tx_pending_frames == 0) + return; + } while (--retries); + +out: + msleep(500); +} + static int dpaa2_eth_stop(struct net_device *net_dev) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); @@ -1341,7 +1404,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev) * on WRIOP. After it finishes, wait until all remaining frames on Rx * and Tx conf queues are consumed on NAPI poll. */ - msleep(500); + wait_for_egress_fq_empty(priv); do { dpni_disable(priv->mc_io, 0, priv->mc_token); @@ -1357,7 +1420,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev) */ } - wait_for_fq_empty(priv); + wait_for_ingress_fq_empty(priv); disable_ch_napi(priv); /* Empty the buffer pool */ @@ -2443,6 +2506,33 @@ static void set_enqueue_mode(struct dpaa2_eth_priv *priv) priv->enqueue = dpaa2_eth_enqueue_fq; } +static int set_pause(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_link_cfg link_cfg = {0}; + int err; + + /* Get the default link options so we don't override other flags */ + err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); + if (err) { + dev_err(dev, "dpni_get_link_cfg() failed\n"); + return err; + } + + /* By default, enable both Rx and Tx pause frames */ + link_cfg.options |= DPNI_LINK_OPT_PAUSE; + link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); + if (err) { + dev_err(dev, "dpni_set_link_cfg() failed\n"); + return err; + } + + priv->link_state.options = link_cfg.options; + + return 0; +} + /* Configure the DPNI object this interface is associated with */ static int setup_dpni(struct fsl_mc_device *ls_dev) { @@ -2498,6 +2588,13 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) set_enqueue_mode(priv); + /* Enable pause frame support */ + if (dpaa2_eth_has_pause_support(priv)) { + err = set_pause(priv); + if (err) + goto close; + } + priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * dpaa2_eth_fs_count(priv), GFP_KERNEL); if (!priv->cls_rules) @@ -2529,7 +2626,6 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv, struct device *dev = priv->net_dev->dev.parent; struct dpni_queue queue; struct dpni_queue_id qid; - struct dpni_taildrop td; int err; err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, @@ -2554,15 +2650,6 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv, return err; } - td.enable = 1; - td.threshold = DPAA2_ETH_TAILDROP_THRESH; - err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE, - DPNI_QUEUE_RX, 0, fq->flowid, &td); - if (err) { - dev_err(dev, "dpni_set_threshold() failed\n"); - return err; - } - /* xdp_rxq setup */ err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, fq->flowid); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 9af18c24221f..8a0e65b3267f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -392,6 +392,7 @@ struct dpaa2_eth_priv { struct dpaa2_eth_drv_stats __percpu *percpu_extras; u16 mc_token; + u8 rx_td_enabled; struct dpni_link_state link_state; bool do_link_poll; @@ -476,6 +477,12 @@ enum dpaa2_eth_rx_dist { #define DPAA2_ETH_DIST_L4DST BIT(8) #define DPAA2_ETH_DIST_ALL (~0ULL) +#define DPNI_PAUSE_VER_MAJOR 7 +#define DPNI_PAUSE_VER_MINOR 13 +#define dpaa2_eth_has_pause_support(priv) \ + (dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \ + DPNI_PAUSE_VER_MINOR) >= 0) + static inline unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index 7b182f4b263c..0aa1c34019bb 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -28,6 +28,11 @@ static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { "[hw] rx nobuffer discards", "[hw] tx discarded frames", "[hw] tx confirmed frames", + "[hw] tx dequeued bytes", + "[hw] tx dequeued frames", + "[hw] tx rejected bytes", + "[hw] tx rejected frames", + "[hw] tx pending frames", }; #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) @@ -78,71 +83,67 @@ static int dpaa2_eth_get_link_ksettings(struct net_device *net_dev, struct ethtool_link_ksettings *link_settings) { - struct dpni_link_state state = {0}; - int err = 0; struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); - if (err) { - netdev_err(net_dev, "ERROR %d getting link state\n", err); - goto out; - } - - /* At the moment, we have no way of interrogating the DPMAC - * from the DPNI side - and for that matter there may exist - * no DPMAC at all. So for now we just don't report anything - * beyond the DPNI attributes. - */ - if (state.options & DPNI_LINK_OPT_AUTONEG) - link_settings->base.autoneg = AUTONEG_ENABLE; - if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) + link_settings->base.autoneg = AUTONEG_DISABLE; + if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX)) link_settings->base.duplex = DUPLEX_FULL; - link_settings->base.speed = state.rate; + link_settings->base.speed = priv->link_state.rate; -out: - return err; + return 0; } -#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7 -#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1 -static int -dpaa2_eth_set_link_ksettings(struct net_device *net_dev, - const struct ethtool_link_ksettings *link_settings) +static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) { - struct dpni_link_cfg cfg = {0}; struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - int err = 0; + u64 link_options = priv->link_state.options; - /* If using an older MC version, the DPNI must be down - * in order to be able to change link settings. Taking steps to let - * the user know that. - */ - if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR, - DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) { - if (netif_running(net_dev)) { - netdev_info(net_dev, "Interface must be brought down first.\n"); - return -EACCES; - } + pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE); + pause->tx_pause = pause->rx_pause ^ + !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE); + pause->autoneg = AUTONEG_DISABLE; +} + +static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpni_link_cfg cfg = {0}; + int err; + + if (!dpaa2_eth_has_pause_support(priv)) { + netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n", + DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR); + return -EOPNOTSUPP; } - cfg.rate = link_settings->base.speed; - if (link_settings->base.autoneg == AUTONEG_ENABLE) - cfg.options |= DPNI_LINK_OPT_AUTONEG; + if (pause->autoneg) + return -EOPNOTSUPP; + + cfg.rate = priv->link_state.rate; + cfg.options = priv->link_state.options; + if (pause->rx_pause) + cfg.options |= DPNI_LINK_OPT_PAUSE; else - cfg.options &= ~DPNI_LINK_OPT_AUTONEG; - if (link_settings->base.duplex == DUPLEX_HALF) - cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; + cfg.options &= ~DPNI_LINK_OPT_PAUSE; + if (!!pause->rx_pause ^ !!pause->tx_pause) + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; else - cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; + + if (cfg.options == priv->link_state.options) + return 0; err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); - if (err) - /* ethtool will be loud enough if we return an error; no point - * in putting our own error message on the console by default - */ - netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err); + if (err) { + netdev_err(net_dev, "dpni_set_link_state failed\n"); + return err; + } - return err; + priv->link_state.options = cfg.options; + + return 0; } static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, @@ -192,27 +193,33 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, struct dpaa2_eth_priv *priv = netdev_priv(net_dev); struct dpaa2_eth_drv_stats *extras; struct dpaa2_eth_ch_stats *ch_stats; + int dpni_stats_page_size[DPNI_STATISTICS_CNT] = { + sizeof(dpni_stats.page_0), + sizeof(dpni_stats.page_1), + sizeof(dpni_stats.page_2), + sizeof(dpni_stats.page_3), + sizeof(dpni_stats.page_4), + sizeof(dpni_stats.page_5), + sizeof(dpni_stats.page_6), + }; memset(data, 0, sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); /* Print standard counters, from DPNI statistics */ - for (j = 0; j <= 2; j++) { + for (j = 0; j <= 6; j++) { + /* We're not interested in pages 4 & 5 for now */ + if (j == 4 || j == 5) + continue; err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, j, &dpni_stats); - if (err != 0) + if (err == -EINVAL) + /* Older firmware versions don't support all pages */ + memset(&dpni_stats, 0, sizeof(dpni_stats)); + else netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j); - switch (j) { - case 0: - num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64); - break; - case 1: - num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64); - break; - case 2: - num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64); - break; - } + + num_cnt = dpni_stats_page_size[j] / sizeof(u64); for (k = 0; k < num_cnt; k++) *(data + i++) = dpni_stats.raw.counter[k]; } @@ -721,7 +728,8 @@ const struct ethtool_ops dpaa2_ethtool_ops = { .get_drvinfo = dpaa2_eth_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = dpaa2_eth_get_link_ksettings, - .set_link_ksettings = dpaa2_eth_set_link_ksettings, + .get_pauseparam = dpaa2_eth_get_pauseparam, + .set_pauseparam = dpaa2_eth_set_pauseparam, .get_sset_count = dpaa2_eth_get_sset_count, .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, .get_strings = dpaa2_eth_get_strings, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h index 7b44d7d9b19a..d9b6918807af 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h @@ -84,6 +84,7 @@ #define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273) #define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274) +#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278) /* Macros for accessing command fields smaller than 1byte */ #define DPNI_MASK(field) \ @@ -284,7 +285,7 @@ struct dpni_rsp_get_statistics { __le64 counter[DPNI_STATISTICS_CNT]; }; -struct dpni_cmd_set_link_cfg { +struct dpni_cmd_link_cfg { /* cmd word 0 */ __le64 pad0; /* cmd word 1 */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c index 220dfc806a24..dd54e6953aeb 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -838,13 +838,13 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io, const struct dpni_link_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; - struct dpni_cmd_set_link_cfg *cmd_params; + struct dpni_cmd_link_cfg *cmd_params; /* prepare command */ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, cmd_flags, token); - cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params; + cmd_params = (struct dpni_cmd_link_cfg *)cmd.params; cmd_params->rate = cpu_to_le32(cfg->rate); cmd_params->options = cpu_to_le64(cfg->options); @@ -853,6 +853,42 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io, } /** + * dpni_get_link_cfg() - return the link configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Link configuration from dpni object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_link_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_link_cfg *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_CFG, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_cmd_link_cfg *)cmd.params; + cfg->rate = le32_to_cpu(rsp_params->rate); + cfg->options = le64_to_cpu(rsp_params->options); + + return err; +} + +/** * dpni_get_link_state() - Return the link state (either up or down) * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' @@ -1434,7 +1470,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_io, * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPNI object * @page: Selects the statistics page to retrieve, see - * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2. + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 6. * @stat: Structure containing the statistics * * Return: '0' on Success; Error code otherwise. diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h index a521242e2353..fd583911b6c0 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -416,6 +416,26 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, * lack of buffers * @page_2.egress_discarded_frames: Egress discarded frame count * @page_2.egress_confirmed_frames: Egress confirmed frame count + * @page3: Page_3 statistics structure + * @page_3.egress_dequeue_bytes: Cumulative count of the number of bytes + * dequeued from egress FQs + * @page_3.egress_dequeue_frames: Cumulative count of the number of frames + * dequeued from egress FQs + * @page_3.egress_reject_bytes: Cumulative count of the number of bytes in + * egress frames whose enqueue was rejected + * @page_3.egress_reject_frames: Cumulative count of the number of egress + * frames whose enqueue was rejected + * @page_4: Page_4 statistics structure: congestion points + * @page_4.cgr_reject_frames: number of rejected frames due to congestion point + * @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point + * @page_5: Page_5 statistics structure: policer + * @page_5.policer_cnt_red: NUmber of red colored frames + * @page_5.policer_cnt_yellow: number of yellow colored frames + * @page_5.policer_cnt_green: number of green colored frames + * @page_5.policer_cnt_re_red: number of recolored red frames + * @page_5.policer_cnt_re_yellow: number of recolored yellow frames + * @page_6: Page_6 statistics structure + * @page_6.tx_pending_frames: total number of frames pending in egress FQs * @raw: raw statistics structure, used to index counters */ union dpni_statistics { @@ -443,6 +463,26 @@ union dpni_statistics { u64 egress_confirmed_frames; } page_2; struct { + u64 egress_dequeue_bytes; + u64 egress_dequeue_frames; + u64 egress_reject_bytes; + u64 egress_reject_frames; + } page_3; + struct { + u64 cgr_reject_frames; + u64 cgr_reject_bytes; + } page_4; + struct { + u64 policer_cnt_red; + u64 policer_cnt_yellow; + u64 policer_cnt_green; + u64 policer_cnt_re_red; + u64 policer_cnt_re_yellow; + } page_5; + struct { + u64 tx_pending_frames; + } page_6; + struct { u64 counter[DPNI_STATISTICS_CNT]; } raw; }; @@ -485,6 +525,11 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io, u16 token, const struct dpni_link_cfg *cfg); +int dpni_get_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_link_cfg *cfg); + /** * struct dpni_link_state - Structure representing DPNI link state * @rate: Rate diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig index 04a59db03f2b..c219587bd334 100644 --- a/drivers/net/ethernet/freescale/enetc/Kconfig +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -20,6 +20,15 @@ config FSL_ENETC_VF If compiled as module (M), the module name is fsl-enetc-vf. +config FSL_ENETC_MDIO + tristate "ENETC MDIO driver" + depends on PCI && (ARCH_LAYERSCAPE || COMPILE_TEST) + help + This driver supports NXP ENETC Central MDIO controller as a PCIe + physical function (PF) device. + + If compiled as module (M), the module name is fsl-enetc-mdio. + config FSL_ENETC_PTP_CLOCK tristate "ENETC PTP clock driver" depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF) diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile index 7139e414dccf..d200c27c3bf6 100644 --- a/drivers/net/ethernet/freescale/enetc/Makefile +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -1,19 +1,16 @@ # SPDX-License-Identifier: GPL-2.0 + +common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o + obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o -fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o \ - enetc_mdio.o +fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs) fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o -fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y) obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o +fsl-enetc-vf-y := enetc_vf.o $(common-objs) -ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy) -fsl-enetc-vf-objs := enetc_vf.o -else -fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \ - enetc_ethtool.o -fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y) -endif +obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o +fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o -fsl-enetc-ptp-$(CONFIG_FSL_ENETC_PTP_CLOCK) += enetc_ptp.o +fsl-enetc-ptp-y := enetc_ptp.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 223709443ea4..b6ff89307409 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -110,7 +110,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, int active_offloads) { struct enetc_tx_swbd *tx_swbd; - struct skb_frag_struct *frag; + skb_frag_t *frag; int len = skb_headlen(skb); union enetc_tx_bd temp_bd; union enetc_tx_bd *txbd; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c index 77b9cd10ba2b..149883c8f0b8 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c @@ -6,18 +6,20 @@ #include <linux/iopoll.h> #include <linux/of.h> -#include "enetc_pf.h" +#include "enetc_mdio.h" -struct enetc_mdio_regs { - u32 mdio_cfg; /* MDIO configuration and status */ - u32 mdio_ctl; /* MDIO control */ - u32 mdio_data; /* MDIO data */ - u32 mdio_addr; /* MDIO address */ -}; +#define ENETC_MDIO_REG_OFFSET 0x1c00 +#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */ +#define ENETC_MDIO_CTL 0x4 /* MDIO control */ +#define ENETC_MDIO_DATA 0x8 /* MDIO data */ +#define ENETC_MDIO_ADDR 0xc /* MDIO address */ -#define bus_to_enetc_regs(bus) (struct enetc_mdio_regs __iomem *)((bus)->priv) +#define enetc_mdio_rd(hw, off) \ + enetc_port_rd(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET) +#define enetc_mdio_wr(hw, off, val) \ + enetc_port_wr(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET, val) +#define enetc_mdio_rd_reg(off) enetc_mdio_rd(hw, off) -#define ENETC_MDIO_REG_OFFSET 0x1c00 #define ENETC_MDC_DIV 258 #define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8) @@ -33,18 +35,18 @@ struct enetc_mdio_regs { #define MDIO_DATA(x) ((x) & 0xffff) #define TIMEOUT 1000 -static int enetc_mdio_wait_complete(struct enetc_mdio_regs __iomem *regs) +static int enetc_mdio_wait_complete(struct enetc_hw *hw) { u32 val; - return readx_poll_timeout(enetc_rd_reg, ®s->mdio_cfg, val, + return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val, !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT); } -static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, - u16 value) +int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) { - struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus); + struct enetc_mdio_priv *mdio_priv = bus->priv; + struct enetc_hw *hw = mdio_priv->hw; u32 mdio_ctl, mdio_cfg; u16 dev_addr; int ret; @@ -59,38 +61,39 @@ static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, mdio_cfg &= ~MDIO_CFG_ENC45; } - enetc_wr_reg(®s->mdio_cfg, mdio_cfg); + enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; /* set port and dev addr */ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); - enetc_wr_reg(®s->mdio_ctl, mdio_ctl); + enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl); /* set the register address */ if (regnum & MII_ADDR_C45) { - enetc_wr_reg(®s->mdio_addr, regnum & 0xffff); + enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; } /* write the value */ - enetc_wr_reg(®s->mdio_data, MDIO_DATA(value)); + enetc_mdio_wr(hw, MDIO_DATA, MDIO_DATA(value)); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; return 0; } -static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { - struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus); + struct enetc_mdio_priv *mdio_priv = bus->priv; + struct enetc_hw *hw = mdio_priv->hw; u32 mdio_ctl, mdio_cfg; u16 dev_addr, value; int ret; @@ -104,41 +107,41 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) mdio_cfg &= ~MDIO_CFG_ENC45; } - enetc_wr_reg(®s->mdio_cfg, mdio_cfg); + enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; /* set port and device addr */ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); - enetc_wr_reg(®s->mdio_ctl, mdio_ctl); + enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl); /* set the register address */ if (regnum & MII_ADDR_C45) { - enetc_wr_reg(®s->mdio_addr, regnum & 0xffff); + enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; } /* initiate the read */ - enetc_wr_reg(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); + enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl | MDIO_CTL_READ); - ret = enetc_mdio_wait_complete(regs); + ret = enetc_mdio_wait_complete(hw); if (ret) return ret; /* return all Fs if nothing was there */ - if (enetc_rd_reg(®s->mdio_cfg) & MDIO_CFG_RD_ER) { + if (enetc_mdio_rd(hw, MDIO_CFG) & MDIO_CFG_RD_ER) { dev_dbg(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); return 0xffff; } - value = enetc_rd_reg(®s->mdio_data) & 0xffff; + value = enetc_mdio_rd(hw, MDIO_DATA) & 0xffff; return value; } @@ -146,12 +149,12 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) int enetc_mdio_probe(struct enetc_pf *pf) { struct device *dev = &pf->si->pdev->dev; - struct enetc_mdio_regs __iomem *regs; + struct enetc_mdio_priv *mdio_priv; struct device_node *np; struct mii_bus *bus; - int ret; + int err; - bus = mdiobus_alloc_size(sizeof(regs)); + bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); if (!bus) return -ENOMEM; @@ -159,41 +162,31 @@ int enetc_mdio_probe(struct enetc_pf *pf) bus->read = enetc_mdio_read; bus->write = enetc_mdio_write; bus->parent = dev; + mdio_priv = bus->priv; + mdio_priv->hw = &pf->si->hw; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); - /* store the enetc mdio base address for this bus */ - regs = pf->si->hw.port + ENETC_MDIO_REG_OFFSET; - bus->priv = regs; - np = of_get_child_by_name(dev->of_node, "mdio"); if (!np) { dev_err(dev, "MDIO node missing\n"); - ret = -EINVAL; - goto err_registration; + return -EINVAL; } - ret = of_mdiobus_register(bus, np); - if (ret) { + err = of_mdiobus_register(bus, np); + if (err) { of_node_put(np); dev_err(dev, "cannot register MDIO bus\n"); - goto err_registration; + return err; } of_node_put(np); pf->mdio = bus; return 0; - -err_registration: - mdiobus_free(bus); - - return ret; } void enetc_mdio_remove(struct enetc_pf *pf) { - if (pf->mdio) { + if (pf->mdio) mdiobus_unregister(pf->mdio); - mdiobus_free(pf->mdio); - } } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h new file mode 100644 index 000000000000..60c9a3889824 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2019 NXP */ + +#include <linux/phy.h> +#include "enetc_pf.h" + +struct enetc_mdio_priv { + struct enetc_hw *hw; +}; + +int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value); +int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c new file mode 100644 index 000000000000..fbd41ce01f06 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ +#include <linux/of_mdio.h> +#include "enetc_mdio.h" + +#define ENETC_MDIO_DEV_ID 0xee01 +#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO" +#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus" +#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver" + +static int enetc_pci_mdio_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct enetc_mdio_priv *mdio_priv; + struct device *dev = &pdev->dev; + struct enetc_hw *hw; + struct mii_bus *bus; + int err; + + hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL); + if (!hw) + return -ENOMEM; + + bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); + if (!bus) + return -ENOMEM; + + bus->name = ENETC_MDIO_BUS_NAME; + bus->read = enetc_mdio_read; + bus->write = enetc_mdio_write; + bus->parent = dev; + mdio_priv = bus->priv; + mdio_priv->hw = hw; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + + pcie_flr(pdev); + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(dev, "device enable failed\n"); + return err; + } + + err = pci_request_region(pdev, 0, KBUILD_MODNAME); + if (err) { + dev_err(dev, "pci_request_region failed\n"); + goto err_pci_mem_reg; + } + + hw->port = pci_iomap(pdev, 0, 0); + if (!hw->port) { + err = -ENXIO; + dev_err(dev, "iomap failed\n"); + goto err_ioremap; + } + + err = of_mdiobus_register(bus, dev->of_node); + if (err) + goto err_mdiobus_reg; + + pci_set_drvdata(pdev, bus); + + return 0; + +err_mdiobus_reg: + iounmap(mdio_priv->hw->port); +err_ioremap: + pci_release_mem_regions(pdev); +err_pci_mem_reg: + pci_disable_device(pdev); + + return err; +} + +static void enetc_pci_mdio_remove(struct pci_dev *pdev) +{ + struct mii_bus *bus = pci_get_drvdata(pdev); + struct enetc_mdio_priv *mdio_priv; + + mdiobus_unregister(bus); + mdio_priv = bus->priv; + iounmap(mdio_priv->hw->port); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id enetc_pci_mdio_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table); + +static struct pci_driver enetc_pci_mdio_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_pci_mdio_id_table, + .probe = enetc_pci_mdio_probe, + .remove = enetc_pci_mdio_remove, +}; +module_pci_driver(enetc_pci_mdio_driver); + +MODULE_DESCRIPTION(ENETC_MDIO_DRV_NAME); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 258b3cb38a6f..7d6513ff8507 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -750,6 +750,7 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv) { struct enetc_pf *pf = enetc_si_priv(priv->si); struct device_node *np = priv->dev->of_node; + struct device_node *mdio_np; int err; if (!np) { @@ -773,7 +774,9 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv) priv->phy_node = of_node_get(np); } - if (!of_phy_is_fixed_link(np)) { + mdio_np = of_get_child_by_name(np, "mdio"); + if (mdio_np) { + of_node_put(mdio_np); err = enetc_mdio_probe(pf); if (err) { of_node_put(priv->phy_node); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index e5610a4da539..d4d4c72adf49 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -208,8 +208,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); /* FEC MII MMFR bits definition */ #define FEC_MMFR_ST (1 << 30) +#define FEC_MMFR_ST_C45 (0) #define FEC_MMFR_OP_READ (2 << 28) +#define FEC_MMFR_OP_READ_C45 (3 << 28) #define FEC_MMFR_OP_WRITE (1 << 28) +#define FEC_MMFR_OP_ADDR_WRITE (0) #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) #define FEC_MMFR_TA (2 << 16) @@ -365,7 +368,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); - frag_len = skb_shinfo(skb)->frags[frag].size; + frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]); /* Handle the last BD specially */ if (frag == nr_frags - 1) { @@ -387,7 +390,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ebdp->cbd_esc = cpu_to_fec32(estatus); } - bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; + bufaddr = skb_frag_address(this_frag); index = fec_enet_get_bd_index(bdp, &txq->bd); if (((unsigned long) bufaddr) & fep->tx_align || @@ -1767,7 +1770,8 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) struct fec_enet_private *fep = bus->priv; struct device *dev = &fep->pdev->dev; unsigned long time_left; - int ret = 0; + int ret = 0, frame_start, frame_addr, frame_op; + bool is_c45 = !!(regnum & MII_ADDR_C45); ret = pm_runtime_get_sync(dev); if (ret < 0) @@ -1775,9 +1779,37 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) reinit_completion(&fep->mdio_done); + if (is_c45) { + frame_start = FEC_MMFR_ST_C45; + + /* write address */ + frame_addr = (regnum >> 16); + writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | + FEC_MMFR_TA | (regnum & 0xFFFF), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + time_left = wait_for_completion_timeout(&fep->mdio_done, + usecs_to_jiffies(FEC_MII_TIMEOUT)); + if (time_left == 0) { + netdev_err(fep->netdev, "MDIO address write timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + frame_op = FEC_MMFR_OP_READ_C45; + + } else { + /* C22 read */ + frame_op = FEC_MMFR_OP_READ; + frame_start = FEC_MMFR_ST; + frame_addr = regnum; + } + /* start a read op */ - writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | + writel(frame_start | frame_op | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); /* wait for end of transfer */ @@ -1804,7 +1836,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, struct fec_enet_private *fep = bus->priv; struct device *dev = &fep->pdev->dev; unsigned long time_left; - int ret; + int ret, frame_start, frame_addr; + bool is_c45 = !!(regnum & MII_ADDR_C45); ret = pm_runtime_get_sync(dev); if (ret < 0) @@ -1814,9 +1847,33 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, reinit_completion(&fep->mdio_done); + if (is_c45) { + frame_start = FEC_MMFR_ST_C45; + + /* write address */ + frame_addr = (regnum >> 16); + writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | + FEC_MMFR_TA | (regnum & 0xFFFF), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + time_left = wait_for_completion_timeout(&fep->mdio_done, + usecs_to_jiffies(FEC_MII_TIMEOUT)); + if (time_left == 0) { + netdev_err(fep->netdev, "MDIO address write timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + } else { + /* C22 write */ + frame_start = FEC_MMFR_ST; + frame_addr = regnum; + } + /* start a write op */ - writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | + writel(frame_start | FEC_MMFR_OP_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | FEC_MMFR_TA | FEC_MMFR_DATA(value), fep->hwp + FEC_MII_DATA); @@ -1828,6 +1885,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, ret = -ETIMEDOUT; } +out: pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); @@ -3338,7 +3396,6 @@ fec_probe(struct platform_device *pdev) struct fec_platform_data *pdata; struct net_device *ndev; int i, irq, ret = 0; - struct resource *r; const struct of_device_id *of_id; static int dev_id; struct device_node *np = pdev->dev.of_node, *phy_node; @@ -3378,8 +3435,7 @@ fec_probe(struct platform_device *pdev) /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - fep->hwp = devm_ioremap_resource(&pdev->dev, r); + fep->hwp = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(fep->hwp)) { ret = PTR_ERR(fep->hwp); goto failed_ioremap; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 5fad73b2e123..3981c06f082f 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -501,7 +501,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) nr_frags = skb_shinfo(skb)->nr_frags; frag = skb_shinfo(skb)->frags; for (i = 0; i < nr_frags; i++, frag++) { - if (!IS_ALIGNED(frag->page_offset, 4)) { + if (!IS_ALIGNED(skb_frag_off(frag), 4)) { is_aligned = 0; break; } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7ea19e173339..24bf7f68375f 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -105,43 +105,6 @@ const char gfar_driver_version[] = "2.0"; -static int gfar_enet_open(struct net_device *dev); -static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); -static void gfar_reset_task(struct work_struct *work); -static void gfar_timeout(struct net_device *dev); -static int gfar_close(struct net_device *dev); -static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, - int alloc_cnt); -static int gfar_set_mac_address(struct net_device *dev); -static int gfar_change_mtu(struct net_device *dev, int new_mtu); -static irqreturn_t gfar_error(int irq, void *dev_id); -static irqreturn_t gfar_transmit(int irq, void *dev_id); -static irqreturn_t gfar_interrupt(int irq, void *dev_id); -static void adjust_link(struct net_device *dev); -static noinline void gfar_update_link_state(struct gfar_private *priv); -static int init_phy(struct net_device *dev); -static int gfar_probe(struct platform_device *ofdev); -static int gfar_remove(struct platform_device *ofdev); -static void free_skb_resources(struct gfar_private *priv); -static void gfar_set_multi(struct net_device *dev); -static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); -static void gfar_configure_serdes(struct net_device *dev); -static int gfar_poll_rx(struct napi_struct *napi, int budget); -static int gfar_poll_tx(struct napi_struct *napi, int budget); -static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); -static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); -#ifdef CONFIG_NET_POLL_CONTROLLER -static void gfar_netpoll(struct net_device *dev); -#endif -int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); -static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); -static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); -static void gfar_halt_nodisable(struct gfar_private *priv); -static void gfar_clear_exact_match(struct net_device *dev); -static void gfar_set_mac_for_addr(struct net_device *dev, int num, - const u8 *addr); -static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); - MODULE_AUTHOR("Freescale Semiconductor, Inc"); MODULE_DESCRIPTION("Gianfar Ethernet Driver"); MODULE_LICENSE("GPL"); @@ -162,138 +125,6 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, bdp->lstatus = cpu_to_be32(lstatus); } -static void gfar_init_bds(struct net_device *ndev) -{ - struct gfar_private *priv = netdev_priv(ndev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - struct gfar_priv_tx_q *tx_queue = NULL; - struct gfar_priv_rx_q *rx_queue = NULL; - struct txbd8 *txbdp; - u32 __iomem *rfbptr; - int i, j; - - for (i = 0; i < priv->num_tx_queues; i++) { - tx_queue = priv->tx_queue[i]; - /* Initialize some variables in our dev structure */ - tx_queue->num_txbdfree = tx_queue->tx_ring_size; - tx_queue->dirty_tx = tx_queue->tx_bd_base; - tx_queue->cur_tx = tx_queue->tx_bd_base; - tx_queue->skb_curtx = 0; - tx_queue->skb_dirtytx = 0; - - /* Initialize Transmit Descriptor Ring */ - txbdp = tx_queue->tx_bd_base; - for (j = 0; j < tx_queue->tx_ring_size; j++) { - txbdp->lstatus = 0; - txbdp->bufPtr = 0; - txbdp++; - } - - /* Set the last descriptor in the ring to indicate wrap */ - txbdp--; - txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | - TXBD_WRAP); - } - - rfbptr = ®s->rfbptr0; - for (i = 0; i < priv->num_rx_queues; i++) { - rx_queue = priv->rx_queue[i]; - - rx_queue->next_to_clean = 0; - rx_queue->next_to_use = 0; - rx_queue->next_to_alloc = 0; - - /* make sure next_to_clean != next_to_use after this - * by leaving at least 1 unused descriptor - */ - gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); - - rx_queue->rfbptr = rfbptr; - rfbptr += 2; - } -} - -static int gfar_alloc_skb_resources(struct net_device *ndev) -{ - void *vaddr; - dma_addr_t addr; - int i, j; - struct gfar_private *priv = netdev_priv(ndev); - struct device *dev = priv->dev; - struct gfar_priv_tx_q *tx_queue = NULL; - struct gfar_priv_rx_q *rx_queue = NULL; - - priv->total_tx_ring_size = 0; - for (i = 0; i < priv->num_tx_queues; i++) - priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; - - priv->total_rx_ring_size = 0; - for (i = 0; i < priv->num_rx_queues; i++) - priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; - - /* Allocate memory for the buffer descriptors */ - vaddr = dma_alloc_coherent(dev, - (priv->total_tx_ring_size * - sizeof(struct txbd8)) + - (priv->total_rx_ring_size * - sizeof(struct rxbd8)), - &addr, GFP_KERNEL); - if (!vaddr) - return -ENOMEM; - - for (i = 0; i < priv->num_tx_queues; i++) { - tx_queue = priv->tx_queue[i]; - tx_queue->tx_bd_base = vaddr; - tx_queue->tx_bd_dma_base = addr; - tx_queue->dev = ndev; - /* enet DMA only understands physical addresses */ - addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; - vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; - } - - /* Start the rx descriptor ring where the tx ring leaves off */ - for (i = 0; i < priv->num_rx_queues; i++) { - rx_queue = priv->rx_queue[i]; - rx_queue->rx_bd_base = vaddr; - rx_queue->rx_bd_dma_base = addr; - rx_queue->ndev = ndev; - rx_queue->dev = dev; - addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; - vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; - } - - /* Setup the skbuff rings */ - for (i = 0; i < priv->num_tx_queues; i++) { - tx_queue = priv->tx_queue[i]; - tx_queue->tx_skbuff = - kmalloc_array(tx_queue->tx_ring_size, - sizeof(*tx_queue->tx_skbuff), - GFP_KERNEL); - if (!tx_queue->tx_skbuff) - goto cleanup; - - for (j = 0; j < tx_queue->tx_ring_size; j++) - tx_queue->tx_skbuff[j] = NULL; - } - - for (i = 0; i < priv->num_rx_queues; i++) { - rx_queue = priv->rx_queue[i]; - rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, - sizeof(*rx_queue->rx_buff), - GFP_KERNEL); - if (!rx_queue->rx_buff) - goto cleanup; - } - - gfar_init_bds(ndev); - - return 0; - -cleanup: - free_skb_resources(priv); - return -ENOMEM; -} - static void gfar_init_tx_rx_base(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; @@ -444,7 +275,7 @@ static void gfar_configure_coalescing(struct gfar_private *priv, } } -void gfar_configure_coalescing_all(struct gfar_private *priv) +static void gfar_configure_coalescing_all(struct gfar_private *priv) { gfar_configure_coalescing(priv, 0xFF, 0xFF); } @@ -477,6 +308,62 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) return &dev->stats; } +/* Set the appropriate hash bit for the given addr */ +/* The algorithm works like so: + * 1) Take the Destination Address (ie the multicast address), and + * do a CRC on it (little endian), and reverse the bits of the + * result. + * 2) Use the 8 most significant bits as a hash into a 256-entry + * table. The table is controlled through 8 32-bit registers: + * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is + * gaddr7. This means that the 3 most significant bits in the + * hash index which gaddr register to use, and the 5 other bits + * indicate which bit (assuming an IBM numbering scheme, which + * for PowerPC (tm) is usually the case) in the register holds + * the entry. + */ +static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) +{ + u32 tempval; + struct gfar_private *priv = netdev_priv(dev); + u32 result = ether_crc(ETH_ALEN, addr); + int width = priv->hash_width; + u8 whichbit = (result >> (32 - width)) & 0x1f; + u8 whichreg = result >> (32 - width + 5); + u32 value = (1 << (31-whichbit)); + + tempval = gfar_read(priv->hash_regs[whichreg]); + tempval |= value; + gfar_write(priv->hash_regs[whichreg], tempval); +} + +/* There are multiple MAC Address register pairs on some controllers + * This function sets the numth pair to a given address + */ +static void gfar_set_mac_for_addr(struct net_device *dev, int num, + const u8 *addr) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + u32 __iomem *macptr = ®s->macstnaddr1; + + macptr += num*2; + + /* For a station address of 0x12345678ABCD in transmission + * order (BE), MACnADDR1 is set to 0xCDAB7856 and + * MACnADDR2 is set to 0x34120000. + */ + tempval = (addr[5] << 24) | (addr[4] << 16) | + (addr[3] << 8) | addr[2]; + + gfar_write(macptr, tempval); + + tempval = (addr[1] << 24) | (addr[0] << 16); + + gfar_write(macptr+1, tempval); +} + static int gfar_set_mac_addr(struct net_device *dev, void *p) { eth_mac_addr(dev, p); @@ -486,24 +373,6 @@ static int gfar_set_mac_addr(struct net_device *dev, void *p) return 0; } -static const struct net_device_ops gfar_netdev_ops = { - .ndo_open = gfar_enet_open, - .ndo_start_xmit = gfar_start_xmit, - .ndo_stop = gfar_close, - .ndo_change_mtu = gfar_change_mtu, - .ndo_set_features = gfar_set_features, - .ndo_set_rx_mode = gfar_set_multi, - .ndo_tx_timeout = gfar_timeout, - .ndo_do_ioctl = gfar_ioctl, - .ndo_get_stats = gfar_get_stats, - .ndo_change_carrier = fixed_phy_change_carrier, - .ndo_set_mac_address = gfar_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = gfar_netpoll, -#endif -}; - static void gfar_ints_disable(struct gfar_private *priv) { int i; @@ -723,10 +592,53 @@ static int gfar_of_group_count(struct device_node *np) return num; } +/* Reads the controller's registers to determine what interface + * connects it to the PHY. + */ +static phy_interface_t gfar_get_interface(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 ecntrl; + + ecntrl = gfar_read(®s->ecntrl); + + if (ecntrl & ECNTRL_SGMII_MODE) + return PHY_INTERFACE_MODE_SGMII; + + if (ecntrl & ECNTRL_TBI_MODE) { + if (ecntrl & ECNTRL_REDUCED_MODE) + return PHY_INTERFACE_MODE_RTBI; + else + return PHY_INTERFACE_MODE_TBI; + } + + if (ecntrl & ECNTRL_REDUCED_MODE) { + if (ecntrl & ECNTRL_REDUCED_MII_MODE) { + return PHY_INTERFACE_MODE_RMII; + } + else { + phy_interface_t interface = priv->interface; + + /* This isn't autodetected right now, so it must + * be set by the device tree or platform code. + */ + if (interface == PHY_INTERFACE_MODE_RGMII_ID) + return PHY_INTERFACE_MODE_RGMII_ID; + + return PHY_INTERFACE_MODE_RGMII; + } + } + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) + return PHY_INTERFACE_MODE_GMII; + + return PHY_INTERFACE_MODE_MII; +} + static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) { const char *model; - const char *ctype; const void *mac_addr; int err = 0, i; struct net_device *dev = NULL; @@ -889,13 +801,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) FSL_GIANFAR_DEV_HAS_TIMER | FSL_GIANFAR_DEV_HAS_RX_FILER; - err = of_property_read_string(np, "phy-connection-type", &ctype); - - /* We only care about rgmii-id. The rest are autodetected */ - if (err == 0 && !strcmp(ctype, "rgmii-id")) - priv->interface = PHY_INTERFACE_MODE_RGMII_ID; + /* Use PHY connection type from the DT node if one is specified there. + * rgmii-id really needs to be specified. Other types can be + * detected by hardware + */ + err = of_get_phy_mode(np); + if (err >= 0) + priv->interface = err; else - priv->interface = PHY_INTERFACE_MODE_MII; + priv->interface = gfar_get_interface(dev); if (of_find_property(np, "fsl,magic-packet", NULL)) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; @@ -931,85 +845,6 @@ tx_alloc_failed: return err; } -static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) -{ - struct hwtstamp_config config; - struct gfar_private *priv = netdev_priv(netdev); - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - priv->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) - return -ERANGE; - priv->hwts_tx_en = 1; - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - if (priv->hwts_rx_en) { - priv->hwts_rx_en = 0; - reset_gfar(netdev); - } - break; - default: - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) - return -ERANGE; - if (!priv->hwts_rx_en) { - priv->hwts_rx_en = 1; - reset_gfar(netdev); - } - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - } - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) -{ - struct hwtstamp_config config; - struct gfar_private *priv = netdev_priv(netdev); - - config.flags = 0; - config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - config.rx_filter = (priv->hwts_rx_en ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct phy_device *phydev = dev->phydev; - - if (!netif_running(dev)) - return -EINVAL; - - if (cmd == SIOCSHWTSTAMP) - return gfar_hwtstamp_set(dev, rq); - if (cmd == SIOCGHWTSTAMP) - return gfar_hwtstamp_get(dev, rq); - - if (!phydev) - return -ENODEV; - - return phy_mii_ioctl(phydev, rq, cmd); -} - static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, u32 class) { @@ -1133,135 +968,6 @@ static void gfar_detect_errata(struct gfar_private *priv) priv->errata); } -void gfar_mac_reset(struct gfar_private *priv) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 tempval; - - /* Reset MAC layer */ - gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); - - /* We need to delay at least 3 TX clocks */ - udelay(3); - - /* the soft reset bit is not self-resetting, so we need to - * clear it before resuming normal operation - */ - gfar_write(®s->maccfg1, 0); - - udelay(3); - - gfar_rx_offload_en(priv); - - /* Initialize the max receive frame/buffer lengths */ - gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); - gfar_write(®s->mrblr, GFAR_RXB_SIZE); - - /* Initialize the Minimum Frame Length Register */ - gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); - - /* Initialize MACCFG2. */ - tempval = MACCFG2_INIT_SETTINGS; - - /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 - * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, - * and by checking RxBD[LG] and discarding larger than MAXFRM. - */ - if (gfar_has_errata(priv, GFAR_ERRATA_74)) - tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; - - gfar_write(®s->maccfg2, tempval); - - /* Clear mac addr hash registers */ - gfar_write(®s->igaddr0, 0); - gfar_write(®s->igaddr1, 0); - gfar_write(®s->igaddr2, 0); - gfar_write(®s->igaddr3, 0); - gfar_write(®s->igaddr4, 0); - gfar_write(®s->igaddr5, 0); - gfar_write(®s->igaddr6, 0); - gfar_write(®s->igaddr7, 0); - - gfar_write(®s->gaddr0, 0); - gfar_write(®s->gaddr1, 0); - gfar_write(®s->gaddr2, 0); - gfar_write(®s->gaddr3, 0); - gfar_write(®s->gaddr4, 0); - gfar_write(®s->gaddr5, 0); - gfar_write(®s->gaddr6, 0); - gfar_write(®s->gaddr7, 0); - - if (priv->extended_hash) - gfar_clear_exact_match(priv->ndev); - - gfar_mac_rx_config(priv); - - gfar_mac_tx_config(priv); - - gfar_set_mac_address(priv->ndev); - - gfar_set_multi(priv->ndev); - - /* clear ievent and imask before configuring coalescing */ - gfar_ints_disable(priv); - - /* Configure the coalescing support */ - gfar_configure_coalescing_all(priv); -} - -static void gfar_hw_init(struct gfar_private *priv) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 attrs; - - /* Stop the DMA engine now, in case it was running before - * (The firmware could have used it, and left it running). - */ - gfar_halt(priv); - - gfar_mac_reset(priv); - - /* Zero out the rmon mib registers if it has them */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { - memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); - - /* Mask off the CAM interrupts */ - gfar_write(®s->rmon.cam1, 0xffffffff); - gfar_write(®s->rmon.cam2, 0xffffffff); - } - - /* Initialize ECNTRL */ - gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); - - /* Set the extraction length and index */ - attrs = ATTRELI_EL(priv->rx_stash_size) | - ATTRELI_EI(priv->rx_stash_index); - - gfar_write(®s->attreli, attrs); - - /* Start with defaults, and add stashing - * depending on driver parameters - */ - attrs = ATTR_INIT_SETTINGS; - - if (priv->bd_stash_en) - attrs |= ATTR_BDSTASH; - - if (priv->rx_stash_size != 0) - attrs |= ATTR_BUFSTASH; - - gfar_write(®s->attr, attrs); - - /* FIFO configs */ - gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); - gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); - gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); - - /* Program the interrupt steering regs, only for MG devices */ - if (priv->num_grps > 1) - gfar_write_isrg(priv); -} - static void gfar_init_addr_hash_table(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; @@ -1302,578 +1008,6 @@ static void gfar_init_addr_hash_table(struct gfar_private *priv) } } -/* Set up the ethernet device structure, private data, - * and anything else we need before we start - */ -static int gfar_probe(struct platform_device *ofdev) -{ - struct device_node *np = ofdev->dev.of_node; - struct net_device *dev = NULL; - struct gfar_private *priv = NULL; - int err = 0, i; - - err = gfar_of_init(ofdev, &dev); - - if (err) - return err; - - priv = netdev_priv(dev); - priv->ndev = dev; - priv->ofdev = ofdev; - priv->dev = &ofdev->dev; - SET_NETDEV_DEV(dev, &ofdev->dev); - - INIT_WORK(&priv->reset_task, gfar_reset_task); - - platform_set_drvdata(ofdev, priv); - - gfar_detect_errata(priv); - - /* Set the dev->base_addr to the gfar reg region */ - dev->base_addr = (unsigned long) priv->gfargrp[0].regs; - - /* Fill in the dev structure */ - dev->watchdog_timeo = TX_TIMEOUT; - /* MTU range: 50 - 9586 */ - dev->mtu = 1500; - dev->min_mtu = 50; - dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN; - dev->netdev_ops = &gfar_netdev_ops; - dev->ethtool_ops = &gfar_ethtool_ops; - - /* Register for napi ...We are registering NAPI for each grp */ - for (i = 0; i < priv->num_grps; i++) { - if (priv->poll_mode == GFAR_SQ_POLLING) { - netif_napi_add(dev, &priv->gfargrp[i].napi_rx, - gfar_poll_rx_sq, GFAR_DEV_WEIGHT); - netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, - gfar_poll_tx_sq, 2); - } else { - netif_napi_add(dev, &priv->gfargrp[i].napi_rx, - gfar_poll_rx, GFAR_DEV_WEIGHT); - netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, - gfar_poll_tx, 2); - } - } - - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | - NETIF_F_RXCSUM; - dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | - NETIF_F_RXCSUM | NETIF_F_HIGHDMA; - } - - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; - dev->features |= NETIF_F_HW_VLAN_CTAG_RX; - } - - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - - gfar_init_addr_hash_table(priv); - - /* Insert receive time stamps into padding alignment bytes, and - * plus 2 bytes padding to ensure the cpu alignment. - */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) - priv->padding = 8 + DEFAULT_PADDING; - - if (dev->features & NETIF_F_IP_CSUM || - priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) - dev->needed_headroom = GMAC_FCB_LEN; - - /* Initializing some of the rx/tx queue level parameters */ - for (i = 0; i < priv->num_tx_queues; i++) { - priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; - priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; - priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; - priv->tx_queue[i]->txic = DEFAULT_TXIC; - } - - for (i = 0; i < priv->num_rx_queues; i++) { - priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; - priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; - priv->rx_queue[i]->rxic = DEFAULT_RXIC; - } - - /* Always enable rx filer if available */ - priv->rx_filer_enable = - (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; - /* Enable most messages by default */ - priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; - /* use pritority h/w tx queue scheduling for single queue devices */ - if (priv->num_tx_queues == 1) - priv->prio_sched_en = 1; - - set_bit(GFAR_DOWN, &priv->state); - - gfar_hw_init(priv); - - /* Carrier starts down, phylib will bring it up */ - netif_carrier_off(dev); - - err = register_netdev(dev); - - if (err) { - pr_err("%s: Cannot register net device, aborting\n", dev->name); - goto register_fail; - } - - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) - priv->wol_supported |= GFAR_WOL_MAGIC; - - if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && - priv->rx_filer_enable) - priv->wol_supported |= GFAR_WOL_FILER_UCAST; - - device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); - - /* fill out IRQ number and name fields */ - for (i = 0; i < priv->num_grps; i++) { - struct gfar_priv_grp *grp = &priv->gfargrp[i]; - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", - dev->name, "_g", '0' + i, "_tx"); - sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", - dev->name, "_g", '0' + i, "_rx"); - sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", - dev->name, "_g", '0' + i, "_er"); - } else - strcpy(gfar_irq(grp, TX)->name, dev->name); - } - - /* Initialize the filer table */ - gfar_init_filer_table(priv); - - /* Print out the device info */ - netdev_info(dev, "mac: %pM\n", dev->dev_addr); - - /* Even more device info helps when determining which kernel - * provided which set of benchmarks. - */ - netdev_info(dev, "Running with NAPI enabled\n"); - for (i = 0; i < priv->num_rx_queues; i++) - netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", - i, priv->rx_queue[i]->rx_ring_size); - for (i = 0; i < priv->num_tx_queues; i++) - netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", - i, priv->tx_queue[i]->tx_ring_size); - - return 0; - -register_fail: - if (of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); - unmap_group_regs(priv); - gfar_free_rx_queues(priv); - gfar_free_tx_queues(priv); - of_node_put(priv->phy_node); - of_node_put(priv->tbi_node); - free_gfar_dev(priv); - return err; -} - -static int gfar_remove(struct platform_device *ofdev) -{ - struct gfar_private *priv = platform_get_drvdata(ofdev); - struct device_node *np = ofdev->dev.of_node; - - of_node_put(priv->phy_node); - of_node_put(priv->tbi_node); - - unregister_netdev(priv->ndev); - - if (of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); - - unmap_group_regs(priv); - gfar_free_rx_queues(priv); - gfar_free_tx_queues(priv); - free_gfar_dev(priv); - - return 0; -} - -#ifdef CONFIG_PM - -static void __gfar_filer_disable(struct gfar_private *priv) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 temp; - - temp = gfar_read(®s->rctrl); - temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT); - gfar_write(®s->rctrl, temp); -} - -static void __gfar_filer_enable(struct gfar_private *priv) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 temp; - - temp = gfar_read(®s->rctrl); - temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; - gfar_write(®s->rctrl, temp); -} - -/* Filer rules implementing wol capabilities */ -static void gfar_filer_config_wol(struct gfar_private *priv) -{ - unsigned int i; - u32 rqfcr; - - __gfar_filer_disable(priv); - - /* clear the filer table, reject any packet by default */ - rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH; - for (i = 0; i <= MAX_FILER_IDX; i++) - gfar_write_filer(priv, i, rqfcr, 0); - - i = 0; - if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { - /* unicast packet, accept it */ - struct net_device *ndev = priv->ndev; - /* get the default rx queue index */ - u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; - u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | - (ndev->dev_addr[1] << 8) | - ndev->dev_addr[2]; - - rqfcr = (qindex << 10) | RQFCR_AND | - RQFCR_CMP_EXACT | RQFCR_PID_DAH; - - gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); - - dest_mac_addr = (ndev->dev_addr[3] << 16) | - (ndev->dev_addr[4] << 8) | - ndev->dev_addr[5]; - rqfcr = (qindex << 10) | RQFCR_GPI | - RQFCR_CMP_EXACT | RQFCR_PID_DAL; - gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); - } - - __gfar_filer_enable(priv); -} - -static void gfar_filer_restore_table(struct gfar_private *priv) -{ - u32 rqfcr, rqfpr; - unsigned int i; - - __gfar_filer_disable(priv); - - for (i = 0; i <= MAX_FILER_IDX; i++) { - rqfcr = priv->ftp_rqfcr[i]; - rqfpr = priv->ftp_rqfpr[i]; - gfar_write_filer(priv, i, rqfcr, rqfpr); - } - - __gfar_filer_enable(priv); -} - -/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */ -static void gfar_start_wol_filer(struct gfar_private *priv) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 tempval; - int i = 0; - - /* Enable Rx hw queues */ - gfar_write(®s->rqueue, priv->rqueue); - - /* Initialize DMACTRL to have WWR and WOP */ - tempval = gfar_read(®s->dmactrl); - tempval |= DMACTRL_INIT_SETTINGS; - gfar_write(®s->dmactrl, tempval); - - /* Make sure we aren't stopped */ - tempval = gfar_read(®s->dmactrl); - tempval &= ~DMACTRL_GRS; - gfar_write(®s->dmactrl, tempval); - - for (i = 0; i < priv->num_grps; i++) { - regs = priv->gfargrp[i].regs; - /* Clear RHLT, so that the DMA starts polling now */ - gfar_write(®s->rstat, priv->gfargrp[i].rstat); - /* enable the Filer General Purpose Interrupt */ - gfar_write(®s->imask, IMASK_FGPI); - } - - /* Enable Rx DMA */ - tempval = gfar_read(®s->maccfg1); - tempval |= MACCFG1_RX_EN; - gfar_write(®s->maccfg1, tempval); -} - -static int gfar_suspend(struct device *dev) -{ - struct gfar_private *priv = dev_get_drvdata(dev); - struct net_device *ndev = priv->ndev; - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 tempval; - u16 wol = priv->wol_opts; - - if (!netif_running(ndev)) - return 0; - - disable_napi(priv); - netif_tx_lock(ndev); - netif_device_detach(ndev); - netif_tx_unlock(ndev); - - gfar_halt(priv); - - if (wol & GFAR_WOL_MAGIC) { - /* Enable interrupt on Magic Packet */ - gfar_write(®s->imask, IMASK_MAG); - - /* Enable Magic Packet mode */ - tempval = gfar_read(®s->maccfg2); - tempval |= MACCFG2_MPEN; - gfar_write(®s->maccfg2, tempval); - - /* re-enable the Rx block */ - tempval = gfar_read(®s->maccfg1); - tempval |= MACCFG1_RX_EN; - gfar_write(®s->maccfg1, tempval); - - } else if (wol & GFAR_WOL_FILER_UCAST) { - gfar_filer_config_wol(priv); - gfar_start_wol_filer(priv); - - } else { - phy_stop(ndev->phydev); - } - - return 0; -} - -static int gfar_resume(struct device *dev) -{ - struct gfar_private *priv = dev_get_drvdata(dev); - struct net_device *ndev = priv->ndev; - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 tempval; - u16 wol = priv->wol_opts; - - if (!netif_running(ndev)) - return 0; - - if (wol & GFAR_WOL_MAGIC) { - /* Disable Magic Packet mode */ - tempval = gfar_read(®s->maccfg2); - tempval &= ~MACCFG2_MPEN; - gfar_write(®s->maccfg2, tempval); - - } else if (wol & GFAR_WOL_FILER_UCAST) { - /* need to stop rx only, tx is already down */ - gfar_halt(priv); - gfar_filer_restore_table(priv); - - } else { - phy_start(ndev->phydev); - } - - gfar_start(priv); - - netif_device_attach(ndev); - enable_napi(priv); - - return 0; -} - -static int gfar_restore(struct device *dev) -{ - struct gfar_private *priv = dev_get_drvdata(dev); - struct net_device *ndev = priv->ndev; - - if (!netif_running(ndev)) { - netif_device_attach(ndev); - - return 0; - } - - gfar_init_bds(ndev); - - gfar_mac_reset(priv); - - gfar_init_tx_rx_base(priv); - - gfar_start(priv); - - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; - - if (ndev->phydev) - phy_start(ndev->phydev); - - netif_device_attach(ndev); - enable_napi(priv); - - return 0; -} - -static const struct dev_pm_ops gfar_pm_ops = { - .suspend = gfar_suspend, - .resume = gfar_resume, - .freeze = gfar_suspend, - .thaw = gfar_resume, - .restore = gfar_restore, -}; - -#define GFAR_PM_OPS (&gfar_pm_ops) - -#else - -#define GFAR_PM_OPS NULL - -#endif - -/* Reads the controller's registers to determine what interface - * connects it to the PHY. - */ -static phy_interface_t gfar_get_interface(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 ecntrl; - - ecntrl = gfar_read(®s->ecntrl); - - if (ecntrl & ECNTRL_SGMII_MODE) - return PHY_INTERFACE_MODE_SGMII; - - if (ecntrl & ECNTRL_TBI_MODE) { - if (ecntrl & ECNTRL_REDUCED_MODE) - return PHY_INTERFACE_MODE_RTBI; - else - return PHY_INTERFACE_MODE_TBI; - } - - if (ecntrl & ECNTRL_REDUCED_MODE) { - if (ecntrl & ECNTRL_REDUCED_MII_MODE) { - return PHY_INTERFACE_MODE_RMII; - } - else { - phy_interface_t interface = priv->interface; - - /* This isn't autodetected right now, so it must - * be set by the device tree or platform code. - */ - if (interface == PHY_INTERFACE_MODE_RGMII_ID) - return PHY_INTERFACE_MODE_RGMII_ID; - - return PHY_INTERFACE_MODE_RGMII; - } - } - - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) - return PHY_INTERFACE_MODE_GMII; - - return PHY_INTERFACE_MODE_MII; -} - - -/* Initializes driver's PHY state, and attaches to the PHY. - * Returns 0 on success. - */ -static int init_phy(struct net_device *dev) -{ - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - struct gfar_private *priv = netdev_priv(dev); - phy_interface_t interface; - struct phy_device *phydev; - struct ethtool_eee edata; - - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); - - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; - - interface = gfar_get_interface(dev); - - phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, - interface); - if (!phydev) { - dev_err(&dev->dev, "could not attach to PHY\n"); - return -ENODEV; - } - - if (interface == PHY_INTERFACE_MODE_SGMII) - gfar_configure_serdes(dev); - - /* Remove any features not supported by the controller */ - linkmode_and(phydev->supported, phydev->supported, mask); - linkmode_copy(phydev->advertising, phydev->supported); - - /* Add support for flow control */ - phy_support_asym_pause(phydev); - - /* disable EEE autoneg, EEE not supported by eTSEC */ - memset(&edata, 0, sizeof(struct ethtool_eee)); - phy_ethtool_set_eee(phydev, &edata); - - return 0; -} - -/* Initialize TBI PHY interface for communicating with the - * SERDES lynx PHY on the chip. We communicate with this PHY - * through the MDIO bus on each controller, treating it as a - * "normal" PHY at the address found in the TBIPA register. We assume - * that the TBIPA register is valid. Either the MDIO bus code will set - * it to a value that doesn't conflict with other PHYs on the bus, or the - * value doesn't matter, as there are no other PHYs on the bus. - */ -static void gfar_configure_serdes(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - struct phy_device *tbiphy; - - if (!priv->tbi_node) { - dev_warn(&dev->dev, "error: SGMII mode requires that the " - "device tree specify a tbi-handle\n"); - return; - } - - tbiphy = of_phy_find_device(priv->tbi_node); - if (!tbiphy) { - dev_err(&dev->dev, "error: Could not get TBI device\n"); - return; - } - - /* If the link is already up, we must already be ok, and don't need to - * configure and reset the TBI<->SerDes link. Maybe U-Boot configured - * everything for us? Resetting it takes the link down and requires - * several seconds for it to come back. - */ - if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { - put_device(&tbiphy->mdio.dev); - return; - } - - /* Single clk mode, mii mode off(for serdes communication) */ - phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); - - phy_write(tbiphy, MII_ADVERTISE, - ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | - ADVERTISE_1000XPSE_ASYM); - - phy_write(tbiphy, MII_BMCR, - BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | - BMCR_SPEED1000); - - put_device(&tbiphy->mdio.dev); -} - static int __gfar_is_rx_idle(struct gfar_private *priv) { u32 res; @@ -1930,7 +1064,7 @@ retry: } /* Halt the receive and transmit queues */ -void gfar_halt(struct gfar_private *priv) +static void gfar_halt(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; @@ -1949,26 +1083,6 @@ void gfar_halt(struct gfar_private *priv) gfar_write(®s->maccfg1, tempval); } -void stop_gfar(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - - netif_tx_stop_all_queues(dev); - - smp_mb__before_atomic(); - set_bit(GFAR_DOWN, &priv->state); - smp_mb__after_atomic(); - - disable_napi(priv); - - /* disable ints and gracefully shut down Rx/Tx DMA */ - gfar_halt(priv); - - phy_stop(dev->phydev); - - free_skb_resources(priv); -} - static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) { struct txbd8 *txbdp; @@ -2005,8 +1119,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) struct rxbd8 *rxbdp = rx_queue->rx_bd_base; - if (rx_queue->skb) - dev_kfree_skb(rx_queue->skb); + dev_kfree_skb(rx_queue->skb); for (i = 0; i < rx_queue->rx_ring_size; i++) { struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; @@ -2062,7 +1175,27 @@ static void free_skb_resources(struct gfar_private *priv) priv->tx_queue[0]->tx_bd_dma_base); } -void gfar_start(struct gfar_private *priv) +void stop_gfar(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + netif_tx_stop_all_queues(dev); + + smp_mb__before_atomic(); + set_bit(GFAR_DOWN, &priv->state); + smp_mb__after_atomic(); + + disable_napi(priv); + + /* disable ints and gracefully shut down Rx/Tx DMA */ + gfar_halt(priv); + + phy_stop(dev->phydev); + + free_skb_resources(priv); +} + +static void gfar_start(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; @@ -2099,103 +1232,207 @@ void gfar_start(struct gfar_private *priv) netif_trans_update(priv->ndev); /* prevent tx timeout */ } -static void free_grp_irqs(struct gfar_priv_grp *grp) +static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) { - free_irq(gfar_irq(grp, TX)->irq, grp); - free_irq(gfar_irq(grp, RX)->irq, grp); - free_irq(gfar_irq(grp, ER)->irq, grp); + struct page *page; + dma_addr_t addr; + + page = dev_alloc_page(); + if (unlikely(!page)) + return false; + + addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rxq->dev, addr))) { + __free_page(page); + + return false; + } + + rxb->dma = addr; + rxb->page = page; + rxb->page_offset = 0; + + return true; } -static int register_grp_irqs(struct gfar_priv_grp *grp) +static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) { - struct gfar_private *priv = grp->priv; - struct net_device *dev = priv->ndev; - int err; + struct gfar_private *priv = netdev_priv(rx_queue->ndev); + struct gfar_extra_stats *estats = &priv->extra_stats; - /* If the device has multiple interrupts, register for - * them. Otherwise, only register for the one - */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - /* Install our interrupt handlers for Error, - * Transmit, and Receive - */ - err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, - gfar_irq(grp, ER)->name, grp); - if (err < 0) { - netif_err(priv, intr, dev, "Can't get IRQ %d\n", - gfar_irq(grp, ER)->irq); + netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); + atomic64_inc(&estats->rx_alloc_err); +} - goto err_irq_fail; - } - enable_irq_wake(gfar_irq(grp, ER)->irq); +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt) +{ + struct rxbd8 *bdp; + struct gfar_rx_buff *rxb; + int i; - err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, - gfar_irq(grp, TX)->name, grp); - if (err < 0) { - netif_err(priv, intr, dev, "Can't get IRQ %d\n", - gfar_irq(grp, TX)->irq); - goto tx_irq_fail; + i = rx_queue->next_to_use; + bdp = &rx_queue->rx_bd_base[i]; + rxb = &rx_queue->rx_buff[i]; + + while (alloc_cnt--) { + /* try reuse page */ + if (unlikely(!rxb->page)) { + if (unlikely(!gfar_new_page(rx_queue, rxb))) { + gfar_rx_alloc_err(rx_queue); + break; + } } - err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, - gfar_irq(grp, RX)->name, grp); - if (err < 0) { - netif_err(priv, intr, dev, "Can't get IRQ %d\n", - gfar_irq(grp, RX)->irq); - goto rx_irq_fail; + + /* Setup the new RxBD */ + gfar_init_rxbdp(rx_queue, bdp, + rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); + + /* Update to the next pointer */ + bdp++; + rxb++; + + if (unlikely(++i == rx_queue->rx_ring_size)) { + i = 0; + bdp = rx_queue->rx_bd_base; + rxb = rx_queue->rx_buff; } - enable_irq_wake(gfar_irq(grp, RX)->irq); + } - } else { - err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, - gfar_irq(grp, TX)->name, grp); - if (err < 0) { - netif_err(priv, intr, dev, "Can't get IRQ %d\n", - gfar_irq(grp, TX)->irq); - goto err_irq_fail; + rx_queue->next_to_use = i; + rx_queue->next_to_alloc = i; +} + +static void gfar_init_bds(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + struct txbd8 *txbdp; + u32 __iomem *rfbptr; + int i, j; + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + /* Initialize some variables in our dev structure */ + tx_queue->num_txbdfree = tx_queue->tx_ring_size; + tx_queue->dirty_tx = tx_queue->tx_bd_base; + tx_queue->cur_tx = tx_queue->tx_bd_base; + tx_queue->skb_curtx = 0; + tx_queue->skb_dirtytx = 0; + + /* Initialize Transmit Descriptor Ring */ + txbdp = tx_queue->tx_bd_base; + for (j = 0; j < tx_queue->tx_ring_size; j++) { + txbdp->lstatus = 0; + txbdp->bufPtr = 0; + txbdp++; } - enable_irq_wake(gfar_irq(grp, TX)->irq); + + /* Set the last descriptor in the ring to indicate wrap */ + txbdp--; + txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | + TXBD_WRAP); } - return 0; + rfbptr = ®s->rfbptr0; + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; -rx_irq_fail: - free_irq(gfar_irq(grp, TX)->irq, grp); -tx_irq_fail: - free_irq(gfar_irq(grp, ER)->irq, grp); -err_irq_fail: - return err; + rx_queue->next_to_clean = 0; + rx_queue->next_to_use = 0; + rx_queue->next_to_alloc = 0; + + /* make sure next_to_clean != next_to_use after this + * by leaving at least 1 unused descriptor + */ + gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); + rx_queue->rfbptr = rfbptr; + rfbptr += 2; + } } -static void gfar_free_irq(struct gfar_private *priv) +static int gfar_alloc_skb_resources(struct net_device *ndev) { - int i; + void *vaddr; + dma_addr_t addr; + int i, j; + struct gfar_private *priv = netdev_priv(ndev); + struct device *dev = priv->dev; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; - /* Free the IRQs */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - for (i = 0; i < priv->num_grps; i++) - free_grp_irqs(&priv->gfargrp[i]); - } else { - for (i = 0; i < priv->num_grps; i++) - free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, - &priv->gfargrp[i]); + priv->total_tx_ring_size = 0; + for (i = 0; i < priv->num_tx_queues; i++) + priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; + + priv->total_rx_ring_size = 0; + for (i = 0; i < priv->num_rx_queues; i++) + priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; + + /* Allocate memory for the buffer descriptors */ + vaddr = dma_alloc_coherent(dev, + (priv->total_tx_ring_size * + sizeof(struct txbd8)) + + (priv->total_rx_ring_size * + sizeof(struct rxbd8)), + &addr, GFP_KERNEL); + if (!vaddr) + return -ENOMEM; + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + tx_queue->tx_bd_base = vaddr; + tx_queue->tx_bd_dma_base = addr; + tx_queue->dev = ndev; + /* enet DMA only understands physical addresses */ + addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; + vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; } -} -static int gfar_request_irq(struct gfar_private *priv) -{ - int err, i, j; + /* Start the rx descriptor ring where the tx ring leaves off */ + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->rx_bd_base = vaddr; + rx_queue->rx_bd_dma_base = addr; + rx_queue->ndev = ndev; + rx_queue->dev = dev; + addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; + vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; + } - for (i = 0; i < priv->num_grps; i++) { - err = register_grp_irqs(&priv->gfargrp[i]); - if (err) { - for (j = 0; j < i; j++) - free_grp_irqs(&priv->gfargrp[j]); - return err; - } + /* Setup the skbuff rings */ + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + tx_queue->tx_skbuff = + kmalloc_array(tx_queue->tx_ring_size, + sizeof(*tx_queue->tx_skbuff), + GFP_KERNEL); + if (!tx_queue->tx_skbuff) + goto cleanup; + + for (j = 0; j < tx_queue->tx_ring_size; j++) + tx_queue->tx_skbuff[j] = NULL; } + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, + sizeof(*rx_queue->rx_buff), + GFP_KERNEL); + if (!rx_queue->rx_buff) + goto cleanup; + } + + gfar_init_bds(ndev); + return 0; + +cleanup: + free_skb_resources(priv); + return -ENOMEM; } /* Bring the controller up and running */ @@ -2233,27 +1470,245 @@ int startup_gfar(struct net_device *ndev) return 0; } -/* Called when something needs to use the ethernet device - * Returns 0 for success. +static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) +{ + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; + u32 val = 0; + + if (!phydev->duplex) + return val; + + if (!priv->pause_aneg_en) { + if (priv->tx_pause_en) + val |= MACCFG1_TX_FLOW; + if (priv->rx_pause_en) + val |= MACCFG1_RX_FLOW; + } else { + u16 lcl_adv, rmt_adv; + u8 flowctrl; + /* get link partner capabilities */ + rmt_adv = 0; + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + + lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + if (flowctrl & FLOW_CTRL_TX) + val |= MACCFG1_TX_FLOW; + if (flowctrl & FLOW_CTRL_RX) + val |= MACCFG1_RX_FLOW; + } + + return val; +} + +static noinline void gfar_update_link_state(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; + struct gfar_priv_rx_q *rx_queue = NULL; + int i; + + if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) + return; + + if (phydev->link) { + u32 tempval1 = gfar_read(®s->maccfg1); + u32 tempval = gfar_read(®s->maccfg2); + u32 ecntrl = gfar_read(®s->ecntrl); + u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); + + if (phydev->duplex != priv->oldduplex) { + if (!(phydev->duplex)) + tempval &= ~(MACCFG2_FULL_DUPLEX); + else + tempval |= MACCFG2_FULL_DUPLEX; + + priv->oldduplex = phydev->duplex; + } + + if (phydev->speed != priv->oldspeed) { + switch (phydev->speed) { + case 1000: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); + + ecntrl &= ~(ECNTRL_R100); + break; + case 100: + case 10: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); + + /* Reduced mode distinguishes + * between 10 and 100 + */ + if (phydev->speed == SPEED_100) + ecntrl |= ECNTRL_R100; + else + ecntrl &= ~(ECNTRL_R100); + break; + default: + netif_warn(priv, link, priv->ndev, + "Ack! Speed (%d) is not 10/100/1000!\n", + phydev->speed); + break; + } + + priv->oldspeed = phydev->speed; + } + + tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + tempval1 |= gfar_get_flowctrl_cfg(priv); + + /* Turn last free buffer recording on */ + if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { + for (i = 0; i < priv->num_rx_queues; i++) { + u32 bdp_dma; + + rx_queue = priv->rx_queue[i]; + bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); + gfar_write(rx_queue->rfbptr, bdp_dma); + } + + priv->tx_actual_en = 1; + } + + if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) + priv->tx_actual_en = 0; + + gfar_write(®s->maccfg1, tempval1); + gfar_write(®s->maccfg2, tempval); + gfar_write(®s->ecntrl, ecntrl); + + if (!priv->oldlink) + priv->oldlink = 1; + + } else if (priv->oldlink) { + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + } + + if (netif_msg_link(priv)) + phy_print_status(phydev); +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the phydev structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. */ -static int gfar_enet_open(struct net_device *dev) +static void adjust_link(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - int err; + struct phy_device *phydev = dev->phydev; - err = init_phy(dev); - if (err) - return err; + if (unlikely(phydev->link != priv->oldlink || + (phydev->link && (phydev->duplex != priv->oldduplex || + phydev->speed != priv->oldspeed)))) + gfar_update_link_state(priv); +} - err = gfar_request_irq(priv); - if (err) - return err; +/* Initialize TBI PHY interface for communicating with the + * SERDES lynx PHY on the chip. We communicate with this PHY + * through the MDIO bus on each controller, treating it as a + * "normal" PHY at the address found in the TBIPA register. We assume + * that the TBIPA register is valid. Either the MDIO bus code will set + * it to a value that doesn't conflict with other PHYs on the bus, or the + * value doesn't matter, as there are no other PHYs on the bus. + */ +static void gfar_configure_serdes(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *tbiphy; - err = startup_gfar(dev); - if (err) - return err; + if (!priv->tbi_node) { + dev_warn(&dev->dev, "error: SGMII mode requires that the " + "device tree specify a tbi-handle\n"); + return; + } - return err; + tbiphy = of_phy_find_device(priv->tbi_node); + if (!tbiphy) { + dev_err(&dev->dev, "error: Could not get TBI device\n"); + return; + } + + /* If the link is already up, we must already be ok, and don't need to + * configure and reset the TBI<->SerDes link. Maybe U-Boot configured + * everything for us? Resetting it takes the link down and requires + * several seconds for it to come back. + */ + if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { + put_device(&tbiphy->mdio.dev); + return; + } + + /* Single clk mode, mii mode off(for serdes communication) */ + phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); + + phy_write(tbiphy, MII_ADVERTISE, + ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM); + + phy_write(tbiphy, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | + BMCR_SPEED1000); + + put_device(&tbiphy->mdio.dev); +} + +/* Initializes driver's PHY state, and attaches to the PHY. + * Returns 0 on success. + */ +static int init_phy(struct net_device *dev) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + struct gfar_private *priv = netdev_priv(dev); + phy_interface_t interface = priv->interface; + struct phy_device *phydev; + struct ethtool_eee edata; + + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, + interface); + if (!phydev) { + dev_err(&dev->dev, "could not attach to PHY\n"); + return -ENODEV; + } + + if (interface == PHY_INTERFACE_MODE_SGMII) + gfar_configure_serdes(dev); + + /* Remove any features not supported by the controller */ + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); + + /* Add support for flow control */ + phy_support_asym_pause(phydev); + + /* disable EEE autoneg, EEE not supported by eTSEC */ + memset(&edata, 0, sizeof(struct ethtool_eee)); + phy_ethtool_set_eee(phydev, &edata); + + return 0; } static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) @@ -2584,22 +2039,6 @@ dma_map_err: return NETDEV_TX_OK; } -/* Stops the kernel queue, and halts the controller */ -static int gfar_close(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - - cancel_work_sync(&priv->reset_task); - stop_gfar(dev); - - /* Disconnect from the PHY */ - phy_disconnect(dev->phydev); - - gfar_free_irq(priv); - - return 0; -} - /* Changes the mac address if the controller is not running. */ static int gfar_set_mac_address(struct net_device *dev) { @@ -2661,6 +2100,85 @@ static void gfar_timeout(struct net_device *dev) schedule_work(&priv->reset_task); } +static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) +{ + struct hwtstamp_config config; + struct gfar_private *priv = netdev_priv(netdev); + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) + return -ERANGE; + priv->hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + if (priv->hwts_rx_en) { + priv->hwts_rx_en = 0; + reset_gfar(netdev); + } + break; + default: + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) + return -ERANGE; + if (!priv->hwts_rx_en) { + priv->hwts_rx_en = 1; + reset_gfar(netdev); + } + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +{ + struct hwtstamp_config config; + struct gfar_private *priv = netdev_priv(netdev); + + config.flags = 0; + config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config.rx_filter = (priv->hwts_rx_en ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct phy_device *phydev = dev->phydev; + + if (!netif_running(dev)) + return -EINVAL; + + if (cmd == SIOCSHWTSTAMP) + return gfar_hwtstamp_set(dev, rq); + if (cmd == SIOCGHWTSTAMP) + return gfar_hwtstamp_get(dev, rq); + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, rq, cmd); +} + /* Interrupt Handler for Transmit complete */ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { @@ -2768,77 +2286,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) netdev_tx_completed_queue(txq, howmany, bytes_sent); } -static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) -{ - struct page *page; - dma_addr_t addr; - - page = dev_alloc_page(); - if (unlikely(!page)) - return false; - - addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(rxq->dev, addr))) { - __free_page(page); - - return false; - } - - rxb->dma = addr; - rxb->page = page; - rxb->page_offset = 0; - - return true; -} - -static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) -{ - struct gfar_private *priv = netdev_priv(rx_queue->ndev); - struct gfar_extra_stats *estats = &priv->extra_stats; - - netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); - atomic64_inc(&estats->rx_alloc_err); -} - -static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, - int alloc_cnt) -{ - struct rxbd8 *bdp; - struct gfar_rx_buff *rxb; - int i; - - i = rx_queue->next_to_use; - bdp = &rx_queue->rx_bd_base[i]; - rxb = &rx_queue->rx_buff[i]; - - while (alloc_cnt--) { - /* try reuse page */ - if (unlikely(!rxb->page)) { - if (unlikely(!gfar_new_page(rx_queue, rxb))) { - gfar_rx_alloc_err(rx_queue); - break; - } - } - - /* Setup the new RxBD */ - gfar_init_rxbdp(rx_queue, bdp, - rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); - - /* Update to the next pointer */ - bdp++; - rxb++; - - if (unlikely(++i == rx_queue->rx_ring_size)) { - i = 0; - bdp = rx_queue->rx_bd_base; - rxb = rx_queue->rx_buff; - } - } - - rx_queue->next_to_use = i; - rx_queue->next_to_alloc = i; -} - static void count_errors(u32 lstatus, struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); @@ -2876,7 +2323,7 @@ static void count_errors(u32 lstatus, struct net_device *ndev) } } -irqreturn_t gfar_receive(int irq, void *grp_id) +static irqreturn_t gfar_receive(int irq, void *grp_id) { struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; unsigned long flags; @@ -3078,7 +2525,8 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) * until the budget/quota has been reached. Returns the number * of frames handled */ -int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) +static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, + int rx_work_limit) { struct net_device *ndev = rx_queue->ndev; struct gfar_private *priv = netdev_priv(ndev); @@ -3328,6 +2776,98 @@ static int gfar_poll_tx(struct napi_struct *napi, int budget) return 0; } +/* GFAR error interrupt handler */ +static irqreturn_t gfar_error(int irq, void *grp_id) +{ + struct gfar_priv_grp *gfargrp = grp_id; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_private *priv= gfargrp->priv; + struct net_device *dev = priv->ndev; + + /* Save ievent for future reference */ + u32 events = gfar_read(®s->ievent); + + /* Clear IEVENT */ + gfar_write(®s->ievent, events & IEVENT_ERR_MASK); + + /* Magic Packet is not an error. */ + if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && + (events & IEVENT_MAG)) + events &= ~IEVENT_MAG; + + /* Hmm... */ + if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) + netdev_dbg(dev, + "error interrupt (ievent=0x%08x imask=0x%08x)\n", + events, gfar_read(®s->imask)); + + /* Update the error counters */ + if (events & IEVENT_TXE) { + dev->stats.tx_errors++; + + if (events & IEVENT_LC) + dev->stats.tx_window_errors++; + if (events & IEVENT_CRL) + dev->stats.tx_aborted_errors++; + if (events & IEVENT_XFUN) { + netif_dbg(priv, tx_err, dev, + "TX FIFO underrun, packet dropped\n"); + dev->stats.tx_dropped++; + atomic64_inc(&priv->extra_stats.tx_underrun); + + schedule_work(&priv->reset_task); + } + netif_dbg(priv, tx_err, dev, "Transmit Error\n"); + } + if (events & IEVENT_BSY) { + dev->stats.rx_over_errors++; + atomic64_inc(&priv->extra_stats.rx_bsy); + + netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", + gfar_read(®s->rstat)); + } + if (events & IEVENT_BABR) { + dev->stats.rx_errors++; + atomic64_inc(&priv->extra_stats.rx_babr); + + netif_dbg(priv, rx_err, dev, "babbling RX error\n"); + } + if (events & IEVENT_EBERR) { + atomic64_inc(&priv->extra_stats.eberr); + netif_dbg(priv, rx_err, dev, "bus error\n"); + } + if (events & IEVENT_RXC) + netif_dbg(priv, rx_status, dev, "control frame\n"); + + if (events & IEVENT_BABT) { + atomic64_inc(&priv->extra_stats.tx_babt); + netif_dbg(priv, tx_err, dev, "babbling TX error\n"); + } + return IRQ_HANDLED; +} + +/* The interrupt handler for devices with one interrupt */ +static irqreturn_t gfar_interrupt(int irq, void *grp_id) +{ + struct gfar_priv_grp *gfargrp = grp_id; + + /* Save ievent for future reference */ + u32 events = gfar_read(&gfargrp->regs->ievent); + + /* Check for reception */ + if (events & IEVENT_RX_MASK) + gfar_receive(irq, grp_id); + + /* Check for transmit completion */ + if (events & IEVENT_TX_MASK) + gfar_transmit(irq, grp_id); + + /* Check for errors */ + if (events & IEVENT_ERR_MASK) + gfar_error(irq, grp_id); + + return IRQ_HANDLED; +} #ifdef CONFIG_NET_POLL_CONTROLLER /* Polling 'interrupt' - used by things like netconsole to send skbs @@ -3364,44 +2904,154 @@ static void gfar_netpoll(struct net_device *dev) } #endif -/* The interrupt handler for devices with one interrupt */ -static irqreturn_t gfar_interrupt(int irq, void *grp_id) +static void free_grp_irqs(struct gfar_priv_grp *grp) { - struct gfar_priv_grp *gfargrp = grp_id; + free_irq(gfar_irq(grp, TX)->irq, grp); + free_irq(gfar_irq(grp, RX)->irq, grp); + free_irq(gfar_irq(grp, ER)->irq, grp); +} - /* Save ievent for future reference */ - u32 events = gfar_read(&gfargrp->regs->ievent); +static int register_grp_irqs(struct gfar_priv_grp *grp) +{ + struct gfar_private *priv = grp->priv; + struct net_device *dev = priv->ndev; + int err; - /* Check for reception */ - if (events & IEVENT_RX_MASK) - gfar_receive(irq, grp_id); + /* If the device has multiple interrupts, register for + * them. Otherwise, only register for the one + */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + /* Install our interrupt handlers for Error, + * Transmit, and Receive + */ + err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, + gfar_irq(grp, ER)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, ER)->irq); - /* Check for transmit completion */ - if (events & IEVENT_TX_MASK) - gfar_transmit(irq, grp_id); + goto err_irq_fail; + } + enable_irq_wake(gfar_irq(grp, ER)->irq); - /* Check for errors */ - if (events & IEVENT_ERR_MASK) - gfar_error(irq, grp_id); + err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, + gfar_irq(grp, TX)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, TX)->irq); + goto tx_irq_fail; + } + err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, + gfar_irq(grp, RX)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, RX)->irq); + goto rx_irq_fail; + } + enable_irq_wake(gfar_irq(grp, RX)->irq); + + } else { + err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, + gfar_irq(grp, TX)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, TX)->irq); + goto err_irq_fail; + } + enable_irq_wake(gfar_irq(grp, TX)->irq); + } + + return 0; + +rx_irq_fail: + free_irq(gfar_irq(grp, TX)->irq, grp); +tx_irq_fail: + free_irq(gfar_irq(grp, ER)->irq, grp); +err_irq_fail: + return err; - return IRQ_HANDLED; } -/* Called every time the controller might need to be made - * aware of new link state. The PHY code conveys this - * information through variables in the phydev structure, and this - * function converts those variables into the appropriate - * register values, and can bring down the device if needed. +static void gfar_free_irq(struct gfar_private *priv) +{ + int i; + + /* Free the IRQs */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + for (i = 0; i < priv->num_grps; i++) + free_grp_irqs(&priv->gfargrp[i]); + } else { + for (i = 0; i < priv->num_grps; i++) + free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, + &priv->gfargrp[i]); + } +} + +static int gfar_request_irq(struct gfar_private *priv) +{ + int err, i, j; + + for (i = 0; i < priv->num_grps; i++) { + err = register_grp_irqs(&priv->gfargrp[i]); + if (err) { + for (j = 0; j < i; j++) + free_grp_irqs(&priv->gfargrp[j]); + return err; + } + } + + return 0; +} + +/* Called when something needs to use the ethernet device + * Returns 0 for success. */ -static void adjust_link(struct net_device *dev) +static int gfar_enet_open(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct phy_device *phydev = dev->phydev; + int err; - if (unlikely(phydev->link != priv->oldlink || - (phydev->link && (phydev->duplex != priv->oldduplex || - phydev->speed != priv->oldspeed)))) - gfar_update_link_state(priv); + err = init_phy(dev); + if (err) + return err; + + err = gfar_request_irq(priv); + if (err) + return err; + + err = startup_gfar(dev); + if (err) + return err; + + return err; +} + +/* Stops the kernel queue, and halts the controller */ +static int gfar_close(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + cancel_work_sync(&priv->reset_task); + stop_gfar(dev); + + /* Disconnect from the PHY */ + phy_disconnect(dev->phydev); + + gfar_free_irq(priv); + + return 0; +} + +/* Clears each of the exact match registers to zero, so they + * don't interfere with normal reception + */ +static void gfar_clear_exact_match(struct net_device *dev) +{ + int idx; + static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; + + for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) + gfar_set_mac_for_addr(dev, idx, zero_arr); } /* Update the hash table based on the current list of multicast @@ -3495,274 +3145,582 @@ static void gfar_set_multi(struct net_device *dev) } } - -/* Clears each of the exact match registers to zero, so they - * don't interfere with normal reception - */ -static void gfar_clear_exact_match(struct net_device *dev) +void gfar_mac_reset(struct gfar_private *priv) { - int idx; - static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; - for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) - gfar_set_mac_for_addr(dev, idx, zero_arr); -} + /* Reset MAC layer */ + gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); -/* Set the appropriate hash bit for the given addr */ -/* The algorithm works like so: - * 1) Take the Destination Address (ie the multicast address), and - * do a CRC on it (little endian), and reverse the bits of the - * result. - * 2) Use the 8 most significant bits as a hash into a 256-entry - * table. The table is controlled through 8 32-bit registers: - * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is - * gaddr7. This means that the 3 most significant bits in the - * hash index which gaddr register to use, and the 5 other bits - * indicate which bit (assuming an IBM numbering scheme, which - * for PowerPC (tm) is usually the case) in the register holds - * the entry. - */ -static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) -{ - u32 tempval; - struct gfar_private *priv = netdev_priv(dev); - u32 result = ether_crc(ETH_ALEN, addr); - int width = priv->hash_width; - u8 whichbit = (result >> (32 - width)) & 0x1f; - u8 whichreg = result >> (32 - width + 5); - u32 value = (1 << (31-whichbit)); + /* We need to delay at least 3 TX clocks */ + udelay(3); - tempval = gfar_read(priv->hash_regs[whichreg]); - tempval |= value; - gfar_write(priv->hash_regs[whichreg], tempval); -} + /* the soft reset bit is not self-resetting, so we need to + * clear it before resuming normal operation + */ + gfar_write(®s->maccfg1, 0); + udelay(3); -/* There are multiple MAC Address register pairs on some controllers - * This function sets the numth pair to a given address - */ -static void gfar_set_mac_for_addr(struct net_device *dev, int num, - const u8 *addr) + gfar_rx_offload_en(priv); + + /* Initialize the max receive frame/buffer lengths */ + gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); + gfar_write(®s->mrblr, GFAR_RXB_SIZE); + + /* Initialize the Minimum Frame Length Register */ + gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); + + /* Initialize MACCFG2. */ + tempval = MACCFG2_INIT_SETTINGS; + + /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 + * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, + * and by checking RxBD[LG] and discarding larger than MAXFRM. + */ + if (gfar_has_errata(priv, GFAR_ERRATA_74)) + tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; + + gfar_write(®s->maccfg2, tempval); + + /* Clear mac addr hash registers */ + gfar_write(®s->igaddr0, 0); + gfar_write(®s->igaddr1, 0); + gfar_write(®s->igaddr2, 0); + gfar_write(®s->igaddr3, 0); + gfar_write(®s->igaddr4, 0); + gfar_write(®s->igaddr5, 0); + gfar_write(®s->igaddr6, 0); + gfar_write(®s->igaddr7, 0); + + gfar_write(®s->gaddr0, 0); + gfar_write(®s->gaddr1, 0); + gfar_write(®s->gaddr2, 0); + gfar_write(®s->gaddr3, 0); + gfar_write(®s->gaddr4, 0); + gfar_write(®s->gaddr5, 0); + gfar_write(®s->gaddr6, 0); + gfar_write(®s->gaddr7, 0); + + if (priv->extended_hash) + gfar_clear_exact_match(priv->ndev); + + gfar_mac_rx_config(priv); + + gfar_mac_tx_config(priv); + + gfar_set_mac_address(priv->ndev); + + gfar_set_multi(priv->ndev); + + /* clear ievent and imask before configuring coalescing */ + gfar_ints_disable(priv); + + /* Configure the coalescing support */ + gfar_configure_coalescing_all(priv); +} + +static void gfar_hw_init(struct gfar_private *priv) { - struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 tempval; - u32 __iomem *macptr = ®s->macstnaddr1; + u32 attrs; - macptr += num*2; + /* Stop the DMA engine now, in case it was running before + * (The firmware could have used it, and left it running). + */ + gfar_halt(priv); - /* For a station address of 0x12345678ABCD in transmission - * order (BE), MACnADDR1 is set to 0xCDAB7856 and - * MACnADDR2 is set to 0x34120000. + gfar_mac_reset(priv); + + /* Zero out the rmon mib registers if it has them */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { + memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); + + /* Mask off the CAM interrupts */ + gfar_write(®s->rmon.cam1, 0xffffffff); + gfar_write(®s->rmon.cam2, 0xffffffff); + } + + /* Initialize ECNTRL */ + gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); + + /* Set the extraction length and index */ + attrs = ATTRELI_EL(priv->rx_stash_size) | + ATTRELI_EI(priv->rx_stash_index); + + gfar_write(®s->attreli, attrs); + + /* Start with defaults, and add stashing + * depending on driver parameters */ - tempval = (addr[5] << 24) | (addr[4] << 16) | - (addr[3] << 8) | addr[2]; + attrs = ATTR_INIT_SETTINGS; - gfar_write(macptr, tempval); + if (priv->bd_stash_en) + attrs |= ATTR_BDSTASH; - tempval = (addr[1] << 24) | (addr[0] << 16); + if (priv->rx_stash_size != 0) + attrs |= ATTR_BUFSTASH; - gfar_write(macptr+1, tempval); + gfar_write(®s->attr, attrs); + + /* FIFO configs */ + gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); + gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); + gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); + + /* Program the interrupt steering regs, only for MG devices */ + if (priv->num_grps > 1) + gfar_write_isrg(priv); } -/* GFAR error interrupt handler */ -static irqreturn_t gfar_error(int irq, void *grp_id) +static const struct net_device_ops gfar_netdev_ops = { + .ndo_open = gfar_enet_open, + .ndo_start_xmit = gfar_start_xmit, + .ndo_stop = gfar_close, + .ndo_change_mtu = gfar_change_mtu, + .ndo_set_features = gfar_set_features, + .ndo_set_rx_mode = gfar_set_multi, + .ndo_tx_timeout = gfar_timeout, + .ndo_do_ioctl = gfar_ioctl, + .ndo_get_stats = gfar_get_stats, + .ndo_change_carrier = fixed_phy_change_carrier, + .ndo_set_mac_address = gfar_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = gfar_netpoll, +#endif +}; + +/* Set up the ethernet device structure, private data, + * and anything else we need before we start + */ +static int gfar_probe(struct platform_device *ofdev) { - struct gfar_priv_grp *gfargrp = grp_id; - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_private *priv= gfargrp->priv; - struct net_device *dev = priv->ndev; + struct device_node *np = ofdev->dev.of_node; + struct net_device *dev = NULL; + struct gfar_private *priv = NULL; + int err = 0, i; - /* Save ievent for future reference */ - u32 events = gfar_read(®s->ievent); + err = gfar_of_init(ofdev, &dev); - /* Clear IEVENT */ - gfar_write(®s->ievent, events & IEVENT_ERR_MASK); + if (err) + return err; - /* Magic Packet is not an error. */ - if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && - (events & IEVENT_MAG)) - events &= ~IEVENT_MAG; + priv = netdev_priv(dev); + priv->ndev = dev; + priv->ofdev = ofdev; + priv->dev = &ofdev->dev; + SET_NETDEV_DEV(dev, &ofdev->dev); - /* Hmm... */ - if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) - netdev_dbg(dev, - "error interrupt (ievent=0x%08x imask=0x%08x)\n", - events, gfar_read(®s->imask)); + INIT_WORK(&priv->reset_task, gfar_reset_task); - /* Update the error counters */ - if (events & IEVENT_TXE) { - dev->stats.tx_errors++; + platform_set_drvdata(ofdev, priv); - if (events & IEVENT_LC) - dev->stats.tx_window_errors++; - if (events & IEVENT_CRL) - dev->stats.tx_aborted_errors++; - if (events & IEVENT_XFUN) { - netif_dbg(priv, tx_err, dev, - "TX FIFO underrun, packet dropped\n"); - dev->stats.tx_dropped++; - atomic64_inc(&priv->extra_stats.tx_underrun); + gfar_detect_errata(priv); - schedule_work(&priv->reset_task); + /* Set the dev->base_addr to the gfar reg region */ + dev->base_addr = (unsigned long) priv->gfargrp[0].regs; + + /* Fill in the dev structure */ + dev->watchdog_timeo = TX_TIMEOUT; + /* MTU range: 50 - 9586 */ + dev->mtu = 1500; + dev->min_mtu = 50; + dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN; + dev->netdev_ops = &gfar_netdev_ops; + dev->ethtool_ops = &gfar_ethtool_ops; + + /* Register for napi ...We are registering NAPI for each grp */ + for (i = 0; i < priv->num_grps; i++) { + if (priv->poll_mode == GFAR_SQ_POLLING) { + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx_sq, GFAR_DEV_WEIGHT); + netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx_sq, 2); + } else { + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx, GFAR_DEV_WEIGHT); + netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx, 2); } - netif_dbg(priv, tx_err, dev, "Transmit Error\n"); } - if (events & IEVENT_BSY) { - dev->stats.rx_over_errors++; - atomic64_inc(&priv->extra_stats.rx_bsy); - netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", - gfar_read(®s->rstat)); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_HIGHDMA; } - if (events & IEVENT_BABR) { - dev->stats.rx_errors++; - atomic64_inc(&priv->extra_stats.rx_babr); - netif_dbg(priv, rx_err, dev, "babbling RX error\n"); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; } - if (events & IEVENT_EBERR) { - atomic64_inc(&priv->extra_stats.eberr); - netif_dbg(priv, rx_err, dev, "bus error\n"); + + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + gfar_init_addr_hash_table(priv); + + /* Insert receive time stamps into padding alignment bytes, and + * plus 2 bytes padding to ensure the cpu alignment. + */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) + priv->padding = 8 + DEFAULT_PADDING; + + if (dev->features & NETIF_F_IP_CSUM || + priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) + dev->needed_headroom = GMAC_FCB_LEN; + + /* Initializing some of the rx/tx queue level parameters */ + for (i = 0; i < priv->num_tx_queues; i++) { + priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; + priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; + priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; + priv->tx_queue[i]->txic = DEFAULT_TXIC; } - if (events & IEVENT_RXC) - netif_dbg(priv, rx_status, dev, "control frame\n"); - if (events & IEVENT_BABT) { - atomic64_inc(&priv->extra_stats.tx_babt); - netif_dbg(priv, tx_err, dev, "babbling TX error\n"); + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; + priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; + priv->rx_queue[i]->rxic = DEFAULT_RXIC; } - return IRQ_HANDLED; + + /* Always enable rx filer if available */ + priv->rx_filer_enable = + (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; + /* Enable most messages by default */ + priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; + /* use pritority h/w tx queue scheduling for single queue devices */ + if (priv->num_tx_queues == 1) + priv->prio_sched_en = 1; + + set_bit(GFAR_DOWN, &priv->state); + + gfar_hw_init(priv); + + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(dev); + + err = register_netdev(dev); + + if (err) { + pr_err("%s: Cannot register net device, aborting\n", dev->name); + goto register_fail; + } + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) + priv->wol_supported |= GFAR_WOL_MAGIC; + + if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && + priv->rx_filer_enable) + priv->wol_supported |= GFAR_WOL_FILER_UCAST; + + device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); + + /* fill out IRQ number and name fields */ + for (i = 0; i < priv->num_grps; i++) { + struct gfar_priv_grp *grp = &priv->gfargrp[i]; + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", + dev->name, "_g", '0' + i, "_tx"); + sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", + dev->name, "_g", '0' + i, "_rx"); + sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", + dev->name, "_g", '0' + i, "_er"); + } else + strcpy(gfar_irq(grp, TX)->name, dev->name); + } + + /* Initialize the filer table */ + gfar_init_filer_table(priv); + + /* Print out the device info */ + netdev_info(dev, "mac: %pM\n", dev->dev_addr); + + /* Even more device info helps when determining which kernel + * provided which set of benchmarks. + */ + netdev_info(dev, "Running with NAPI enabled\n"); + for (i = 0; i < priv->num_rx_queues; i++) + netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", + i, priv->rx_queue[i]->rx_ring_size); + for (i = 0; i < priv->num_tx_queues; i++) + netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", + i, priv->tx_queue[i]->tx_ring_size); + + return 0; + +register_fail: + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + unmap_group_regs(priv); + gfar_free_rx_queues(priv); + gfar_free_tx_queues(priv); + of_node_put(priv->phy_node); + of_node_put(priv->tbi_node); + free_gfar_dev(priv); + return err; } -static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) +static int gfar_remove(struct platform_device *ofdev) { - struct net_device *ndev = priv->ndev; - struct phy_device *phydev = ndev->phydev; - u32 val = 0; + struct gfar_private *priv = platform_get_drvdata(ofdev); + struct device_node *np = ofdev->dev.of_node; - if (!phydev->duplex) - return val; + of_node_put(priv->phy_node); + of_node_put(priv->tbi_node); - if (!priv->pause_aneg_en) { - if (priv->tx_pause_en) - val |= MACCFG1_TX_FLOW; - if (priv->rx_pause_en) - val |= MACCFG1_RX_FLOW; - } else { - u16 lcl_adv, rmt_adv; - u8 flowctrl; - /* get link partner capabilities */ - rmt_adv = 0; - if (phydev->pause) - rmt_adv = LPA_PAUSE_CAP; - if (phydev->asym_pause) - rmt_adv |= LPA_PAUSE_ASYM; + unregister_netdev(priv->ndev); - lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); - if (flowctrl & FLOW_CTRL_TX) - val |= MACCFG1_TX_FLOW; - if (flowctrl & FLOW_CTRL_RX) - val |= MACCFG1_RX_FLOW; + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + + unmap_group_regs(priv); + gfar_free_rx_queues(priv); + gfar_free_tx_queues(priv); + free_gfar_dev(priv); + + return 0; +} + +#ifdef CONFIG_PM + +static void __gfar_filer_disable(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 temp; + + temp = gfar_read(®s->rctrl); + temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT); + gfar_write(®s->rctrl, temp); +} + +static void __gfar_filer_enable(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 temp; + + temp = gfar_read(®s->rctrl); + temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; + gfar_write(®s->rctrl, temp); +} + +/* Filer rules implementing wol capabilities */ +static void gfar_filer_config_wol(struct gfar_private *priv) +{ + unsigned int i; + u32 rqfcr; + + __gfar_filer_disable(priv); + + /* clear the filer table, reject any packet by default */ + rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH; + for (i = 0; i <= MAX_FILER_IDX; i++) + gfar_write_filer(priv, i, rqfcr, 0); + + i = 0; + if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { + /* unicast packet, accept it */ + struct net_device *ndev = priv->ndev; + /* get the default rx queue index */ + u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; + u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | + (ndev->dev_addr[1] << 8) | + ndev->dev_addr[2]; + + rqfcr = (qindex << 10) | RQFCR_AND | + RQFCR_CMP_EXACT | RQFCR_PID_DAH; + + gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); + + dest_mac_addr = (ndev->dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | + ndev->dev_addr[5]; + rqfcr = (qindex << 10) | RQFCR_GPI | + RQFCR_CMP_EXACT | RQFCR_PID_DAL; + gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); } - return val; + __gfar_filer_enable(priv); } -static noinline void gfar_update_link_state(struct gfar_private *priv) +static void gfar_filer_restore_table(struct gfar_private *priv) +{ + u32 rqfcr, rqfpr; + unsigned int i; + + __gfar_filer_disable(priv); + + for (i = 0; i <= MAX_FILER_IDX; i++) { + rqfcr = priv->ftp_rqfcr[i]; + rqfpr = priv->ftp_rqfpr[i]; + gfar_write_filer(priv, i, rqfcr, rqfpr); + } + + __gfar_filer_enable(priv); +} + +/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */ +static void gfar_start_wol_filer(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; - struct net_device *ndev = priv->ndev; - struct phy_device *phydev = ndev->phydev; - struct gfar_priv_rx_q *rx_queue = NULL; - int i; + u32 tempval; + int i = 0; - if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) - return; + /* Enable Rx hw queues */ + gfar_write(®s->rqueue, priv->rqueue); - if (phydev->link) { - u32 tempval1 = gfar_read(®s->maccfg1); - u32 tempval = gfar_read(®s->maccfg2); - u32 ecntrl = gfar_read(®s->ecntrl); - u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); + /* Initialize DMACTRL to have WWR and WOP */ + tempval = gfar_read(®s->dmactrl); + tempval |= DMACTRL_INIT_SETTINGS; + gfar_write(®s->dmactrl, tempval); - if (phydev->duplex != priv->oldduplex) { - if (!(phydev->duplex)) - tempval &= ~(MACCFG2_FULL_DUPLEX); - else - tempval |= MACCFG2_FULL_DUPLEX; + /* Make sure we aren't stopped */ + tempval = gfar_read(®s->dmactrl); + tempval &= ~DMACTRL_GRS; + gfar_write(®s->dmactrl, tempval); - priv->oldduplex = phydev->duplex; - } + for (i = 0; i < priv->num_grps; i++) { + regs = priv->gfargrp[i].regs; + /* Clear RHLT, so that the DMA starts polling now */ + gfar_write(®s->rstat, priv->gfargrp[i].rstat); + /* enable the Filer General Purpose Interrupt */ + gfar_write(®s->imask, IMASK_FGPI); + } - if (phydev->speed != priv->oldspeed) { - switch (phydev->speed) { - case 1000: - tempval = - ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); + /* Enable Rx DMA */ + tempval = gfar_read(®s->maccfg1); + tempval |= MACCFG1_RX_EN; + gfar_write(®s->maccfg1, tempval); +} - ecntrl &= ~(ECNTRL_R100); - break; - case 100: - case 10: - tempval = - ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); +static int gfar_suspend(struct device *dev) +{ + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + u16 wol = priv->wol_opts; - /* Reduced mode distinguishes - * between 10 and 100 - */ - if (phydev->speed == SPEED_100) - ecntrl |= ECNTRL_R100; - else - ecntrl &= ~(ECNTRL_R100); - break; - default: - netif_warn(priv, link, priv->ndev, - "Ack! Speed (%d) is not 10/100/1000!\n", - phydev->speed); - break; - } + if (!netif_running(ndev)) + return 0; - priv->oldspeed = phydev->speed; - } + disable_napi(priv); + netif_tx_lock(ndev); + netif_device_detach(ndev); + netif_tx_unlock(ndev); - tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); - tempval1 |= gfar_get_flowctrl_cfg(priv); + gfar_halt(priv); - /* Turn last free buffer recording on */ - if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { - for (i = 0; i < priv->num_rx_queues; i++) { - u32 bdp_dma; + if (wol & GFAR_WOL_MAGIC) { + /* Enable interrupt on Magic Packet */ + gfar_write(®s->imask, IMASK_MAG); - rx_queue = priv->rx_queue[i]; - bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); - gfar_write(rx_queue->rfbptr, bdp_dma); - } + /* Enable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval |= MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); - priv->tx_actual_en = 1; - } + /* re-enable the Rx block */ + tempval = gfar_read(®s->maccfg1); + tempval |= MACCFG1_RX_EN; + gfar_write(®s->maccfg1, tempval); - if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) - priv->tx_actual_en = 0; + } else if (wol & GFAR_WOL_FILER_UCAST) { + gfar_filer_config_wol(priv); + gfar_start_wol_filer(priv); - gfar_write(®s->maccfg1, tempval1); + } else { + phy_stop(ndev->phydev); + } + + return 0; +} + +static int gfar_resume(struct device *dev) +{ + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + u16 wol = priv->wol_opts; + + if (!netif_running(ndev)) + return 0; + + if (wol & GFAR_WOL_MAGIC) { + /* Disable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval &= ~MACCFG2_MPEN; gfar_write(®s->maccfg2, tempval); - gfar_write(®s->ecntrl, ecntrl); - if (!priv->oldlink) - priv->oldlink = 1; + } else if (wol & GFAR_WOL_FILER_UCAST) { + /* need to stop rx only, tx is already down */ + gfar_halt(priv); + gfar_filer_restore_table(priv); - } else if (priv->oldlink) { - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; + } else { + phy_start(ndev->phydev); } - if (netif_msg_link(priv)) - phy_print_status(phydev); + gfar_start(priv); + + netif_device_attach(ndev); + enable_napi(priv); + + return 0; +} + +static int gfar_restore(struct device *dev) +{ + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + + if (!netif_running(ndev)) { + netif_device_attach(ndev); + + return 0; + } + + gfar_init_bds(ndev); + + gfar_mac_reset(priv); + + gfar_init_tx_rx_base(priv); + + gfar_start(priv); + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + if (ndev->phydev) + phy_start(ndev->phydev); + + netif_device_attach(ndev); + enable_napi(priv); + + return 0; } +static const struct dev_pm_ops gfar_pm_ops = { + .suspend = gfar_suspend, + .resume = gfar_resume, + .freeze = gfar_suspend, + .thaw = gfar_resume, + .restore = gfar_restore, +}; + +#define GFAR_PM_OPS (&gfar_pm_ops) + +#else + +#define GFAR_PM_OPS NULL + +#endif + static const struct of_device_id gfar_match[] = { { diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index f2af96349c7b..f472a6dbbe6f 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -67,8 +67,6 @@ struct ethtool_rx_list { /* Number of bytes to align the rx bufs to */ #define RXBUF_ALIGNMENT 64 -#define PHY_INIT_TIMEOUT 100000 - #define DRV_NAME "gfar-enet" extern const char gfar_driver_version[]; @@ -88,10 +86,6 @@ extern const char gfar_driver_version[]; #define GFAR_RX_MAX_RING_SIZE 256 #define GFAR_TX_MAX_RING_SIZE 256 -#define GFAR_MAX_FIFO_THRESHOLD 511 -#define GFAR_MAX_FIFO_STARVE 511 -#define GFAR_MAX_FIFO_STARVE_OFF 511 - #define FBTHR_SHIFT 24 #define DEFAULT_RX_LFC_THR 16 #define DEFAULT_LFC_PTVVAL 4 @@ -109,9 +103,6 @@ extern const char gfar_driver_version[]; #define DEFAULT_FIFO_TX_THR 0x100 #define DEFAULT_FIFO_TX_STARVE 0x40 #define DEFAULT_FIFO_TX_STARVE_OFF 0x80 -#define DEFAULT_BD_STASH 1 -#define DEFAULT_STASH_LENGTH 96 -#define DEFAULT_STASH_INDEX 0 /* The number of Exact Match registers */ #define GFAR_EM_NUM 15 @@ -139,15 +130,6 @@ extern const char gfar_driver_version[]; #define DEFAULT_RX_COALESCE 0 #define DEFAULT_RXCOUNT 0 -#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \ - | SUPPORTED_10baseT_Full \ - | SUPPORTED_100baseT_Half \ - | SUPPORTED_100baseT_Full \ - | SUPPORTED_Autoneg \ - | SUPPORTED_MII) - -#define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full - /* TBI register addresses */ #define MII_TBICON 0x11 @@ -185,8 +167,6 @@ extern const char gfar_driver_version[]; #define ECNTRL_REDUCED_MII_MODE 0x00000004 #define ECNTRL_SGMII_MODE 0x00000002 -#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE - #define MINFLR_INIT_SETTINGS 0x00000040 /* Tqueue control */ @@ -266,12 +246,6 @@ extern const char gfar_driver_version[]; #define DEFAULT_TXIC mk_ic_value(DEFAULT_TXCOUNT, DEFAULT_TXTIME) #define DEFAULT_RXIC mk_ic_value(DEFAULT_RXCOUNT, DEFAULT_RXTIME) -#define skip_bd(bdp, stride, base, ring_size) ({ \ - typeof(bdp) new_bd = (bdp) + (stride); \ - (new_bd >= (base) + (ring_size)) ? (new_bd - (ring_size)) : new_bd; }) - -#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size) - #define RCTRL_TS_ENABLE 0x01000000 #define RCTRL_PAL_MASK 0x001f0000 #define RCTRL_LFC 0x00004000 @@ -385,11 +359,6 @@ extern const char gfar_driver_version[]; #define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT) #define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT) -/* Fifo management */ -#define FIFO_TX_THR_MASK 0x01ff -#define FIFO_TX_STARVE_MASK 0x01ff -#define FIFO_TX_STARVE_OFF_MASK 0x01ff - /* Attribute fields */ /* This enables rx snooping for buffers and descriptors */ @@ -1326,16 +1295,9 @@ static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq) return bdp_dma; } -irqreturn_t gfar_receive(int irq, void *dev_id); int startup_gfar(struct net_device *dev); void stop_gfar(struct net_device *dev); -void reset_gfar(struct net_device *dev); void gfar_mac_reset(struct gfar_private *priv); -void gfar_halt(struct gfar_private *priv); -void gfar_start(struct gfar_private *priv); -void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, - u32 regnum, u32 read); -void gfar_configure_coalescing_all(struct gfar_private *priv); int gfar_set_features(struct net_device *dev, netdev_features_t features); extern const struct ethtool_ops gfar_ethtool_ops; @@ -1348,13 +1310,6 @@ extern const struct ethtool_ops gfar_ethtool_ops; #define RQFCR_PID_PORT_MASK 0xFFFF0000 #define RQFCR_PID_MAC_MASK 0xFF000000 -struct gfar_mask_entry { - unsigned int mask; /* The mask value which is valid form start to end */ - unsigned int start; - unsigned int end; - unsigned int block; /* Same block values indicate depended entries */ -}; - /* Represents a receive filer table entry */ struct gfar_filer_entry { u32 ctrl; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 3433b46b90c1..3c8e4e2efc07 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -45,19 +45,6 @@ #define GFAR_MAX_COAL_USECS 0xffff #define GFAR_MAX_COAL_FRAMES 0xff -static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, - u64 *buf); -static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); -static int gfar_gcoalesce(struct net_device *dev, - struct ethtool_coalesce *cvals); -static int gfar_scoalesce(struct net_device *dev, - struct ethtool_coalesce *cvals); -static void gfar_gringparam(struct net_device *dev, - struct ethtool_ringparam *rvals); -static int gfar_sringparam(struct net_device *dev, - struct ethtool_ringparam *rvals); -static void gfar_gdrvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo); static const char stat_gstrings[][ETH_GSTRING_LEN] = { /* extra stats */ diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 689f18e3100f..90ab7ade44c4 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -877,7 +877,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev) ndev->irq = platform_get_irq(pdev, 0); if (ndev->irq <= 0) { - dev_err(dev, "No irq resource\n"); ret = -ENODEV; goto out_disconnect_phy; } diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 349970557c52..95a6b0926170 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -719,7 +719,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - int len = frag->size; + int len = skb_frag_size(frag); addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE); ret = dma_mapping_error(priv->dev, addr); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index c1eba421ba82..3a14bbc26ea2 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -24,7 +24,7 @@ #include "hns_dsaf_rcb.h" #include "hns_dsaf_misc.h" -const static char *g_dsaf_mode_match[DSAF_MODE_MAX] = { +static const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf", [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss", [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf", diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 2235dd55fab2..a48396dd4ebb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -245,7 +245,7 @@ static int hns_nic_maybe_stop_tso( int frag_num; struct sk_buff *skb = *out_skb; struct sk_buff *new_skb = NULL; - struct skb_frag_struct *frag; + skb_frag_t *frag; size = skb_headlen(skb); buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; @@ -309,7 +309,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, struct hnae_ring *ring = ring_data->ring; struct device *dev = ring_to_dev(ring); struct netdev_queue *dev_queue; - struct skb_frag_struct *frag; + skb_frag_t *frag; int buf_num; int seg_num; dma_addr_t dma; @@ -1182,6 +1182,8 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) if (unlikely(ret)) return -ENODEV; + phy_attached_info(phy_dev); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 75329ab775a6..f8a87f8ca983 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -47,6 +47,8 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ + HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ + HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */ }; /* below are per-VF mac-vlan subcodes */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 908d4f45c06a..03ca7d925e8e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -46,7 +46,7 @@ void hnae3_set_client_init_flag(struct hnae3_client *client, EXPORT_SYMBOL(hnae3_set_client_init_flag); static int hnae3_get_client_init_flag(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev) + struct hnae3_ae_dev *ae_dev) { int inited = 0; @@ -104,7 +104,6 @@ int hnae3_register_client(struct hnae3_client *client) { struct hnae3_client *client_tmp; struct hnae3_ae_dev *ae_dev; - int ret = 0; if (!client) return -ENODEV; @@ -123,7 +122,7 @@ int hnae3_register_client(struct hnae3_client *client) /* if the client could not be initialized on current port, for * any error reasons, move on to next available port */ - ret = hnae3_init_client_instance(client, ae_dev); + int ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed for port, ret = %d\n", @@ -139,12 +138,28 @@ EXPORT_SYMBOL(hnae3_register_client); void hnae3_unregister_client(struct hnae3_client *client) { + struct hnae3_client *client_tmp; struct hnae3_ae_dev *ae_dev; + bool existed = false; if (!client) return; mutex_lock(&hnae3_common_lock); + + list_for_each_entry(client_tmp, &hnae3_client_list, node) { + if (client_tmp->type == client->type) { + existed = true; + break; + } + } + + if (!existed) { + mutex_unlock(&hnae3_common_lock); + pr_err("client %s does not exist!\n", client->name); + return; + } + /* un-initialize the client on every matched port */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { hnae3_uninit_client_instance(client, ae_dev); @@ -164,7 +179,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) const struct pci_device_id *id; struct hnae3_ae_dev *ae_dev; struct hnae3_client *client; - int ret = 0; + int ret; if (!ae_algo) return; @@ -258,7 +273,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; - int ret = 0; + int ret; if (!ae_dev) return -ENODEV; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 48c7b70fc2c4..c4b7bf851a28 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -58,10 +58,10 @@ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) #define hnae3_dev_roce_supported(hdev) \ - hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) #define hnae3_dev_dcb_supported(hdev) \ - hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) #define hnae3_dev_fd_supported(hdev) \ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B) @@ -85,13 +85,18 @@ struct hnae3_queue { void __iomem *io_base; struct hnae3_ae_algo *ae_algo; struct hnae3_handle *handle; - int tqp_index; /* index in a handle */ - u32 buf_size; /* size for hnae_desc->addr, preset by AE */ - u16 tx_desc_num;/* total number of tx desc */ - u16 rx_desc_num;/* total number of rx desc */ + int tqp_index; /* index in a handle */ + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 tx_desc_num; /* total number of tx desc */ + u16 rx_desc_num; /* total number of rx desc */ }; -/*hnae3 loop mode*/ +struct hns3_mac_stats { + u64 tx_pause_cnt; + u64 rx_pause_cnt; +}; + +/* hnae3 loop mode */ enum hnae3_loop { HNAE3_LOOP_APP, HNAE3_LOOP_SERIAL_SERDES, @@ -141,6 +146,12 @@ enum hnae3_reset_notify_type { HNAE3_RESTORE_CLIENT, }; +enum hnae3_hw_error_type { + HNAE3_PPU_POISON_ERROR, + HNAE3_CMDQ_ECC_ERROR, + HNAE3_IMP_RD_POISON_ERROR, +}; + enum hnae3_reset_type { HNAE3_VF_RESET, HNAE3_VF_FUNC_RESET, @@ -179,6 +190,15 @@ struct hnae3_vector_info { #define HNAE3_RING_GL_RX 0 #define HNAE3_RING_GL_TX 1 +#define HNAE3_FW_VERSION_BYTE3_SHIFT 24 +#define HNAE3_FW_VERSION_BYTE3_MASK GENMASK(31, 24) +#define HNAE3_FW_VERSION_BYTE2_SHIFT 16 +#define HNAE3_FW_VERSION_BYTE2_MASK GENMASK(23, 16) +#define HNAE3_FW_VERSION_BYTE1_SHIFT 8 +#define HNAE3_FW_VERSION_BYTE1_MASK GENMASK(15, 8) +#define HNAE3_FW_VERSION_BYTE0_SHIFT 0 +#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0) + struct hnae3_ring_chain_node { struct hnae3_ring_chain_node *next; u32 tqp_index; @@ -196,7 +216,8 @@ struct hnae3_client_ops { int (*setup_tc)(struct hnae3_handle *handle, u8 tc); int (*reset_notify)(struct hnae3_handle *handle, enum hnae3_reset_notify_type type); - enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle); + void (*process_hw_error)(struct hnae3_handle *handle, + enum hnae3_hw_error_type); }; #define HNAE3_CLIENT_NAME_LENGTH 16 @@ -289,6 +310,8 @@ struct hnae3_ae_dev { * Remove multicast address from mac table * update_stats() * Update Old network device statistics + * get_mac_stats() + * get mac pause statistics including tx_cnt and rx_cnt * get_ethtool_stats() * Get ethtool network device statistics * get_strings() @@ -417,8 +440,8 @@ struct hnae3_ae_ops { void (*update_stats)(struct hnae3_handle *handle, struct net_device_stats *net_stats); void (*get_stats)(struct hnae3_handle *handle, u64 *data); - void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt, - u64 *rx_cnt); + void (*get_mac_stats)(struct hnae3_handle *handle, + struct hns3_mac_stats *mac_stats); void (*get_strings)(struct hnae3_handle *handle, u32 stringset, u8 *data); int (*get_sset_count)(struct hnae3_handle *handle, int stringset); @@ -605,7 +628,7 @@ struct hnae3_handle { struct pci_dev *pdev; void *priv; struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */ - u64 flags; /* Indicate the capabilities for this handle*/ + u64 flags; /* Indicate the capabilities for this handle */ union { struct net_device *netdev; /* first member */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index a4b937286f55..28961a68e333 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -8,6 +8,7 @@ #include "hns3_enet.h" #define HNS3_DBG_READ_LEN 256 +#define HNS3_DBG_WRITE_LEN 1024 static struct dentry *hns3_dbgfs_root; @@ -38,7 +39,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, if (queue_num >= h->kinfo.num_tqps) { dev_err(&h->pdev->dev, - "Queue number(%u) is out of range(%u)\n", queue_num, + "Queue number(%u) is out of range(0-%u)\n", queue_num, h->kinfo.num_tqps - 1); return -EINVAL; } @@ -165,6 +166,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) struct hns3_enet_ring *ring; u32 tx_index, rx_index; u32 q_num, value; + dma_addr_t addr; int cnt; cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index); @@ -176,7 +178,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) } if (q_num >= h->kinfo.num_tqps) { - dev_err(dev, "Queue number(%u) is out of range(%u)\n", q_num, + dev_err(dev, "Queue number(%u) is out of range(0-%u)\n", q_num, h->kinfo.num_tqps - 1); return -EINVAL; } @@ -187,14 +189,15 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) tx_index = (cnt == 1) ? value : tx_index; if (tx_index >= ring->desc_num) { - dev_err(dev, "bd index (%u) is out of range(%u)\n", tx_index, + dev_err(dev, "bd index(%u) is out of range(0-%u)\n", tx_index, ring->desc_num - 1); return -EINVAL; } tx_desc = &ring->desc[tx_index]; + addr = le64_to_cpu(tx_desc->addr); dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index); - dev_info(dev, "(TX) addr: 0x%llx\n", tx_desc->addr); + dev_info(dev, "(TX)addr: %pad\n", &addr); dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag); dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size); dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso); @@ -216,8 +219,10 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) rx_index = (cnt == 1) ? value : tx_index; rx_desc = &ring->desc[rx_index]; + addr = le64_to_cpu(rx_desc->addr); dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index); - dev_info(dev, "(RX)addr: 0x%llx\n", rx_desc->addr); + dev_info(dev, "(RX)addr: %pad\n", &addr); + dev_info(dev, "(RX)l234_info: %u\n", rx_desc->rx.l234_info); dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len); dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size); dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash); @@ -237,16 +242,16 @@ static void hns3_dbg_help(struct hnae3_handle *h) char printf_buf[HNS3_DBG_BUF_LEN]; dev_info(&h->pdev->dev, "available commands\n"); - dev_info(&h->pdev->dev, "queue info [number]\n"); + dev_info(&h->pdev->dev, "queue info <number>\n"); dev_info(&h->pdev->dev, "queue map\n"); - dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n"); + dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n"); if (!hns3_is_phys_func(h->pdev)) return; dev_info(&h->pdev->dev, "dump fd tcam\n"); dev_info(&h->pdev->dev, "dump tc\n"); - dev_info(&h->pdev->dev, "dump tm map [q_num]\n"); + dev_info(&h->pdev->dev, "dump tm map <q_num>\n"); dev_info(&h->pdev->dev, "dump tm\n"); dev_info(&h->pdev->dev, "dump qos pause cfg\n"); dev_info(&h->pdev->dev, "dump qos pri map\n"); @@ -258,20 +263,20 @@ static void hns3_dbg_help(struct hnae3_handle *h) dev_info(&h->pdev->dev, "dump mac tnl status\n"); memset(printf_buf, 0, HNS3_DBG_BUF_LEN); - strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]", + strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]", HNS3_DBG_BUF_LEN - 1); strncat(printf_buf + strlen(printf_buf), - " [igu egu <prt_id>] [rpu <tc_queue_num>]", + " [igu egu <port_id>] [rpu <tc_queue_num>]", HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); strncat(printf_buf + strlen(printf_buf), - " [rtc] [ppp] [rcb] [tqp <q_num>]]\n", + " [rtc] [ppp] [rcb] [tqp <queue_num>]]\n", HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); dev_info(&h->pdev->dev, "%s", printf_buf); memset(printf_buf, 0, HNS3_DBG_BUF_LEN); - strncat(printf_buf, "dump reg dcb [port_id] [pri_id] [pg_id]", + strncat(printf_buf, "dump reg dcb <port_id> <pri_id> <pg_id>", HNS3_DBG_BUF_LEN - 1); - strncat(printf_buf + strlen(printf_buf), " [rq_id] [nq_id] [qset_id]\n", + strncat(printf_buf + strlen(printf_buf), " <rq_id> <nq_id> <qset_id>\n", HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); dev_info(&h->pdev->dev, "%s", printf_buf); } @@ -322,6 +327,9 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) return 0; + if (count > HNS3_DBG_WRITE_LEN) + return -ENOSPC; + cmd_buf = kzalloc(count + 1, GFP_KERNEL); if (!cmd_buf) return count; @@ -372,20 +380,11 @@ static const struct file_operations hns3_dbg_cmd_fops = { void hns3_dbg_init(struct hnae3_handle *handle) { const char *name = pci_name(handle->pdev); - struct dentry *pfile; handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root); - if (!handle->hnae3_dbgfs) - return; - pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle, - &hns3_dbg_cmd_fops); - if (!pfile) { - debugfs_remove_recursive(handle->hnae3_dbgfs); - handle->hnae3_dbgfs = NULL; - dev_warn(&handle->pdev->dev, "create file for %s fail\n", - name); - } + debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle, + &hns3_dbg_cmd_fops); } void hns3_dbg_uninit(struct hnae3_handle *handle) @@ -397,10 +396,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle) void hns3_dbg_register_debugfs(const char *debugfs_dir_name) { hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL); - if (!hns3_dbgfs_root) { - pr_warn("Register debugfs for %s fail\n", debugfs_dir_name); - return; - } } void hns3_dbg_unregister_debugfs(void) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 310afa708831..616cad0faa21 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -28,6 +28,12 @@ #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) +#define hns3_rl_err(fmt, ...) \ + do { \ + if (net_ratelimit()) \ + netdev_err(fmt, ##__VA_ARGS__); \ + } while (0) + static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); static void hns3_remove_hw_addr(struct net_device *netdev); @@ -45,6 +51,9 @@ MODULE_PARM_DESC(debug, " Network interface message level setting"); #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) +#define HNS3_INNER_VLAN_TAG 1 +#define HNS3_OUTER_VLAN_TAG 2 + /* hns3_pci_tbl - PCI Device ID Table * * Last entry must be all 0s @@ -220,9 +229,9 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, /* initialize the configuration for interrupt coalescing. * 1. GL (Interrupt Gap Limiter) * 2. RL (Interrupt Rate Limiter) + * + * Default: enable interrupt coalescing self-adaptive and GL */ - - /* Default: enable interrupt coalescing self-adaptive and GL */ tqp_vector->tx_group.coal.gl_adapt_enable = 1; tqp_vector->rx_group.coal.gl_adapt_enable = 1; @@ -459,6 +468,9 @@ static int hns3_nic_net_open(struct net_device *netdev) h->ae_algo->ops->set_timer_task(priv->ae_handle, true); hns3_config_xps(priv); + + netif_dbg(h, drv, netdev, "net open\n"); + return 0; } @@ -519,6 +531,8 @@ static int hns3_nic_net_stop(struct net_device *netdev) if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) return 0; + netif_dbg(h, drv, netdev, "net stop\n"); + if (h->ae_algo->ops->set_timer_task) h->ae_algo->ops->set_timer_task(priv->ae_handle, false); @@ -956,16 +970,16 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U); } -static int hns3_fill_desc_vtags(struct sk_buff *skb, - struct hns3_enet_ring *tx_ring, - u32 *inner_vlan_flag, - u32 *out_vlan_flag, - u16 *inner_vtag, - u16 *out_vtag) +static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, + struct sk_buff *skb) { -#define HNS3_TX_VLAN_PRIO_SHIFT 13 - struct hnae3_handle *handle = tx_ring->tqp->handle; + struct vlan_ethhdr *vhdr; + int rc; + + if (!(skb->protocol == htons(ETH_P_8021Q) || + skb_vlan_tag_present(skb))) + return 0; /* Since HW limitation, if port based insert VLAN enabled, only one VLAN * header is allowed in skb, otherwise it will cause RAS error. @@ -976,8 +990,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, return -EINVAL; if (skb->protocol == htons(ETH_P_8021Q) && - !(tx_ring->tqp->handle->kinfo.netdev->features & - NETIF_F_HW_VLAN_CTAG_TX)) { + !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { /* When HW VLAN acceleration is turned off, and the stack * sets the protocol to 802.1q, the driver just need to * set the protocol to the encapsulated ethertype. @@ -987,45 +1000,107 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, } if (skb_vlan_tag_present(skb)) { - u16 vlan_tag; - - vlan_tag = skb_vlan_tag_get(skb); - vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT; - /* Based on hw strategy, use out_vtag in two layer tag case, * and use inner_vtag in one tag case. */ - if (skb->protocol == htons(ETH_P_8021Q)) { - if (handle->port_base_vlan_state == - HNAE3_PORT_BASE_VLAN_DISABLE){ - hns3_set_field(*out_vlan_flag, - HNS3_TXD_OVLAN_B, 1); - *out_vtag = vlan_tag; - } else { - hns3_set_field(*inner_vlan_flag, - HNS3_TXD_VLAN_B, 1); - *inner_vtag = vlan_tag; - } - } else { - hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); - *inner_vtag = vlan_tag; - } - } else if (skb->protocol == htons(ETH_P_8021Q)) { - struct vlan_ethhdr *vhdr; - int rc; + if (skb->protocol == htons(ETH_P_8021Q) && + handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE) + rc = HNS3_OUTER_VLAN_TAG; + else + rc = HNS3_INNER_VLAN_TAG; - rc = skb_cow_head(skb, 0); - if (unlikely(rc < 0)) - return rc; - vhdr = (struct vlan_ethhdr *)skb->data; - vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) - << HNS3_TX_VLAN_PRIO_SHIFT); + skb->protocol = vlan_get_protocol(skb); + return rc; } + rc = skb_cow_head(skb, 0); + if (unlikely(rc < 0)) + return rc; + + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) + & VLAN_PRIO_MASK); + skb->protocol = vlan_get_protocol(skb); return 0; } +static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, struct hns3_desc *desc) +{ + u32 ol_type_vlan_len_msec = 0; + u32 type_cs_vlan_tso = 0; + u32 paylen = skb->len; + u16 inner_vtag = 0; + u16 out_vtag = 0; + u16 mss = 0; + int ret; + + ret = hns3_handle_vtags(ring, skb); + if (unlikely(ret < 0)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_vlan_err++; + u64_stats_update_end(&ring->syncp); + return ret; + } else if (ret == HNS3_INNER_VLAN_TAG) { + inner_vtag = skb_vlan_tag_get(skb); + inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + VLAN_PRIO_MASK; + hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); + } else if (ret == HNS3_OUTER_VLAN_TAG) { + out_vtag = skb_vlan_tag_get(skb); + out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + VLAN_PRIO_MASK; + hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, + 1); + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ol4_proto, il4_proto; + + skb_reset_mac_len(skb); + + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + if (unlikely(ret)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_l4_proto_err++; + u64_stats_update_end(&ring->syncp); + return ret; + } + + ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + if (unlikely(ret)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_l2l3l4_err++; + u64_stats_update_end(&ring->syncp); + return ret; + } + + ret = hns3_set_tso(skb, &paylen, &mss, + &type_cs_vlan_tso); + if (unlikely(ret)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_tso_err++; + u64_stats_update_end(&ring->syncp); + return ret; + } + } + + /* Set txbd */ + desc->tx.ol_type_vlan_len_msec = + cpu_to_le32(ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); + desc->tx.paylen = cpu_to_le32(paylen); + desc->tx.mss = cpu_to_le16(mss); + desc->tx.vlan_tag = cpu_to_le16(inner_vtag); + desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); + + return 0; +} + static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, unsigned int size, int frag_end, enum hns_desc_type type) @@ -1033,65 +1108,29 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc *desc = &ring->desc[ring->next_to_use]; struct device *dev = ring_to_dev(ring); - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int frag_buf_num; int k, sizeoflast; dma_addr_t dma; if (type == DESC_TYPE_SKB) { struct sk_buff *skb = (struct sk_buff *)priv; - u32 ol_type_vlan_len_msec = 0; - u32 type_cs_vlan_tso = 0; - u32 paylen = skb->len; - u16 inner_vtag = 0; - u16 out_vtag = 0; - u16 mss = 0; int ret; - ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, - &ol_type_vlan_len_msec, - &inner_vtag, &out_vtag); + ret = hns3_fill_skb_desc(ring, skb, desc); if (unlikely(ret)) return ret; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 ol4_proto, il4_proto; - - skb_reset_mac_len(skb); - - ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (unlikely(ret)) - return ret; - - ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - if (unlikely(ret)) - return ret; - - ret = hns3_set_tso(skb, &paylen, &mss, - &type_cs_vlan_tso); - if (unlikely(ret)) - return ret; - } - - /* Set txbd */ - desc->tx.ol_type_vlan_len_msec = - cpu_to_le32(ol_type_vlan_len_msec); - desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); - desc->tx.paylen = cpu_to_le32(paylen); - desc->tx.mss = cpu_to_le16(mss); - desc->tx.vlan_tag = cpu_to_le16(inner_vtag); - desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); } else { - frag = (struct skb_frag_struct *)priv; + frag = (skb_frag_t *)priv; dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); } if (unlikely(dma_mapping_error(dev, dma))) { + u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); return -ENOMEM; } @@ -1147,28 +1186,20 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, return 0; } -static int hns3_nic_bd_num(struct sk_buff *skb) +static unsigned int hns3_nic_bd_num(struct sk_buff *skb) { - int size = skb_headlen(skb); - int i, bd_num; + unsigned int bd_num; + int i; /* if the total len is within the max bd limit */ if (likely(skb->len <= HNS3_MAX_BD_SIZE)) return skb_shinfo(skb)->nr_frags + 1; - bd_num = hns3_tx_bd_count(size); + bd_num = hns3_tx_bd_count(skb_headlen(skb)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; - int frag_bd_num; - - size = skb_frag_size(frag); - frag_bd_num = hns3_tx_bd_count(size); - - if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG)) - return -ENOMEM; - - bd_num += frag_bd_num; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + bd_num += hns3_tx_bd_count(skb_frag_size(frag)); } return bd_num; @@ -1189,7 +1220,7 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) */ static bool hns3_skb_need_linearized(struct sk_buff *skb) { - int bd_limit = HNS3_MAX_BD_PER_FRAG - 1; + int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1; unsigned int tot_len = 0; int i; @@ -1219,21 +1250,16 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, struct sk_buff **out_skb) { struct sk_buff *skb = *out_skb; - int bd_num; + unsigned int bd_num; bd_num = hns3_nic_bd_num(skb); - if (bd_num < 0) - return bd_num; - - if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) { + if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) { struct sk_buff *new_skb; - if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb)) + if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO && + !hns3_skb_need_linearized(skb)) goto out; - bd_num = hns3_tx_bd_count(skb->len); - if (unlikely(ring_space(ring) < bd_num)) - return -EBUSY; /* manual split the send packet */ new_skb = skb_copy(skb, GFP_ATOMIC); if (!new_skb) @@ -1241,6 +1267,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, dev_kfree_skb_any(skb); *out_skb = new_skb; + bd_num = hns3_nic_bd_num(new_skb); + if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) || + (!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL)) + return -ENOMEM; + u64_stats_update_begin(&ring->syncp); ring->stats.tx_copy++; u64_stats_update_end(&ring->syncp); @@ -1290,7 +1321,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) &tx_ring_data(priv, skb->queue_mapping); struct hns3_enet_ring *ring = ring_data->ring; struct netdev_queue *dev_queue; - struct skb_frag_struct *frag; + skb_frag_t *frag; int next_to_use_head; int buf_num; int seg_num; @@ -1314,9 +1345,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) u64_stats_update_end(&ring->syncp); } - if (net_ratelimit()) - netdev_err(netdev, "xmit error: %d!\n", buf_num); - + hns3_rl_err(netdev, "xmit error: %d!\n", buf_num); goto out_err_tx_ok; } @@ -1482,7 +1511,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev, tx_bytes += ring->stats.tx_bytes; tx_pkts += ring->stats.tx_pkts; tx_drop += ring->stats.sw_err_cnt; + tx_drop += ring->stats.tx_vlan_err; + tx_drop += ring->stats.tx_l4_proto_err; + tx_drop += ring->stats.tx_l2l3l4_err; + tx_drop += ring->stats.tx_tso_err; tx_errors += ring->stats.sw_err_cnt; + tx_errors += ring->stats.tx_vlan_err; + tx_errors += ring->stats.tx_l4_proto_err; + tx_errors += ring->stats.tx_l2l3l4_err; + tx_errors += ring->stats.tx_tso_err; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); /* fetch the rx stats */ @@ -1550,6 +1587,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) h = hns3_get_handle(netdev); kinfo = &h->kinfo; + netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); + return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; } @@ -1593,6 +1632,10 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, struct hnae3_handle *h = hns3_get_handle(netdev); int ret = -EIO; + netif_dbg(h, drv, netdev, + "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n", + vf, vlan, qos, vlan_proto); + if (h->ae_algo->ops->set_vf_vlan_filter) ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, qos, vlan_proto); @@ -1611,6 +1654,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) if (!h->ae_algo->ops->set_mtu) return -EOPNOTSUPP; + netif_dbg(h, drv, netdev, + "change mtu from %u to %d\n", netdev->mtu, new_mtu); + ret = h->ae_algo->ops->set_mtu(h, new_mtu); if (ret) netdev_err(netdev, "failed to change MTU in hardware %d\n", @@ -1680,15 +1726,12 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) /* When mac received many pause frames continuous, it's unable to send * packets, which may cause tx timeout */ - if (h->ae_algo->ops->update_stats && - h->ae_algo->ops->get_mac_pause_stats) { - u64 tx_pause_cnt, rx_pause_cnt; + if (h->ae_algo->ops->get_mac_stats) { + struct hns3_mac_stats mac_stats; - h->ae_algo->ops->update_stats(h, &ndev->stats); - h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt, - &rx_pause_cnt); + h->ae_algo->ops->get_mac_stats(h, &mac_stats); netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", - tx_pause_cnt, rx_pause_cnt); + mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); } hw_head = readl_relaxed(tx_ring->tqp->io_base + @@ -1963,7 +2006,8 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) ops = ae_dev->ops; /* request the reset */ - if (ops->reset_event) { + if (ops->reset_event && ops->get_reset_level && + ops->set_default_reset_request) { if (ae_dev->hw_err_reset_req) { reset_type = ops->get_reset_level(ae_dev, &ae_dev->hw_err_reset_req); @@ -2067,7 +2111,7 @@ static void hns3_set_default_feature(struct net_device *netdev) static int hns3_alloc_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { - unsigned int order = hnae3_page_order(ring); + unsigned int order = hns3_page_order(ring); struct page *p; p = dev_alloc_pages(order); @@ -2078,7 +2122,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, cb->page_offset = 0; cb->reuse_flag = 0; cb->buf = page_address(p); - cb->length = hnae3_page_size(ring); + cb->length = hns3_page_size(ring); cb->type = DESC_TYPE_PAGE; return 0; @@ -2357,8 +2401,9 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, ring->stats.sw_err_cnt++; u64_stats_update_end(&ring->syncp); - netdev_err(ring->tqp->handle->kinfo.netdev, - "hnae reserve buffer map failed.\n"); + hns3_rl_err(ring->tqp_vector->napi.dev, + "alloc rx buffer failed: %d\n", + ret); break; } hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); @@ -2381,7 +2426,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, { struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; int size = le16_to_cpu(desc->rx.size); - u32 truesize = hnae3_buf_size(ring); + u32 truesize = hns3_buf_size(ring); skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, size - pull_len, truesize); @@ -2396,7 +2441,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, /* Move offset up to the next cache line */ desc_cb->page_offset += truesize; - if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) { + if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { desc_cb->reuse_flag = 1; /* Bump ref count on page before it is given */ get_page(desc_cb->priv); @@ -2443,9 +2488,9 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, &iph->daddr, 0); } else { - netdev_err(skb->dev, - "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", - be16_to_cpu(type), depth); + hns3_rl_err(skb->dev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", + be16_to_cpu(type), depth); return -EFAULT; } @@ -2587,7 +2632,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); skb = ring->skb; if (unlikely(!skb)) { - netdev_err(netdev, "alloc rx skb fail\n"); + hns3_rl_err(netdev, "alloc rx skb fail\n"); u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++; @@ -2662,8 +2707,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, new_skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); if (unlikely(!new_skb)) { - netdev_err(ring->tqp->handle->kinfo.netdev, - "alloc rx skb frag fail\n"); + hns3_rl_err(ring->tqp_vector->napi.dev, + "alloc rx fraglist skb fail\n"); return -ENXIO; } ring->frag_num = 0; @@ -2678,7 +2723,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, } if (ring->tail_skb) { - head_skb->truesize += hnae3_buf_size(ring); + head_skb->truesize += hns3_buf_size(ring); head_skb->data_len += le16_to_cpu(desc->rx.size); head_skb->len += le16_to_cpu(desc->rx.size); skb = ring->tail_skb; @@ -2895,24 +2940,22 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) { #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 - int recv_pkts, recv_bds, clean_count, err; int unused_count = hns3_desc_unused(ring); struct sk_buff *skb = ring->skb; - int num; + int recv_pkts = 0; + int recv_bds = 0; + int err, num; num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); rmb(); /* Make sure num taken effect before the other data is touched */ - recv_pkts = 0, recv_bds = 0, clean_count = 0; num -= unused_count; unused_count -= ring->pending_buf; while (recv_pkts < budget && recv_bds < num) { /* Reuse or realloc buffers */ - if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { - hns3_nic_alloc_rx_buffers(ring, - clean_count + unused_count); - clean_count = 0; + if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + hns3_nic_alloc_rx_buffers(ring, unused_count); unused_count = hns3_desc_unused(ring) - ring->pending_buf; } @@ -2926,7 +2969,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, goto out; } else if (unlikely(err)) { /* Do jump the err */ recv_bds += ring->pending_buf; - clean_count += ring->pending_buf; + unused_count += ring->pending_buf; ring->skb = NULL; ring->pending_buf = 0; continue; @@ -2934,7 +2977,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, rx_fn(ring, skb); recv_bds += ring->pending_buf; - clean_count += ring->pending_buf; + unused_count += ring->pending_buf; ring->skb = NULL; ring->pending_buf = 0; @@ -2943,8 +2986,8 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, out: /* Make all data has been write before submit */ - if (clean_count + unused_count > 0) - hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count); + if (unused_count > 0) + hns3_nic_alloc_rx_buffers(ring, unused_count); return recv_pkts; } @@ -3574,7 +3617,7 @@ out: return ret; } -static void hns3_fini_ring(struct hns3_enet_ring *ring) +void hns3_fini_ring(struct hns3_enet_ring *ring) { hns3_free_desc(ring); devm_kfree(ring_to_dev(ring), ring->desc_cb); @@ -4165,8 +4208,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) static void hns3_store_coal(struct hns3_nic_priv *priv) { /* ethtool only support setting and querying one coal - * configuation for now, so save the vector 0' coal - * configuation here in order to restore it. + * configuration for now, so save the vector 0' coal + * configuration here in order to restore it. */ memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, sizeof(struct hns3_enet_coalesce)); @@ -4368,6 +4411,30 @@ static int hns3_reset_notify(struct hnae3_handle *handle, return ret; } +static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, + bool rxfh_configured) +{ + int ret; + + ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, + rxfh_configured); + if (ret) { + dev_err(&handle->pdev->dev, + "Change tqp num(%u) fail.\n", new_tqp_num); + return ret; + } + + ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); + if (ret) + hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); + + return ret; +} + int hns3_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { @@ -4378,6 +4445,9 @@ int hns3_set_channels(struct net_device *netdev, u16 org_tqp_num; int ret; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + if (ch->rx_count || ch->tx_count) return -EINVAL; @@ -4392,6 +4462,10 @@ int hns3_set_channels(struct net_device *netdev, if (kinfo->rss_size == new_tqp_num) return 0; + netif_dbg(h, drv, netdev, + "set channels: tqp_num=%u, rxfh=%d\n", + new_tqp_num, rxfh_configured); + ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); if (ret) return ret; @@ -4401,24 +4475,46 @@ int hns3_set_channels(struct net_device *netdev, return ret; org_tqp_num = h->kinfo.num_tqps; - ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured); + ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); if (ret) { - ret = h->ae_algo->ops->set_channels(h, org_tqp_num, - rxfh_configured); - if (ret) { - /* If revert to old tqp failed, fatal error occurred */ - dev_err(&netdev->dev, - "Revert to old tqp num fail, ret=%d", ret); - return ret; + int ret1; + + netdev_warn(netdev, + "Change channels fail, revert to old value\n"); + ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); + if (ret1) { + netdev_err(netdev, + "revert to old channel fail\n"); + return ret1; } - dev_info(&netdev->dev, - "Change tqp num fail, Revert to old tqp num"); - } - ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT); - if (ret) + return ret; + } + + return 0; +} + +static const struct hns3_hw_error_info hns3_hw_err[] = { + { .type = HNAE3_PPU_POISON_ERROR, + .msg = "PPU poison" }, + { .type = HNAE3_CMDQ_ECC_ERROR, + .msg = "IMP CMDQ error" }, + { .type = HNAE3_IMP_RD_POISON_ERROR, + .msg = "IMP RD poison" }, +}; - return hns3_reset_notify(h, HNAE3_UP_CLIENT); +static void hns3_process_hw_error(struct hnae3_handle *handle, + enum hnae3_hw_error_type type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { + if (hns3_hw_err[i].type == type) { + dev_err(&handle->pdev->dev, "Detected %s!\n", + hns3_hw_err[i].msg); + break; + } + } } static const struct hnae3_client_ops client_ops = { @@ -4427,6 +4523,7 @@ static const struct hnae3_client_ops client_ops = { .link_status_change = hns3_link_status_change, .setup_tc = hns3_client_setup_tc, .reset_notify = hns3_reset_notify, + .process_hw_error = hns3_process_hw_error, }; /* hns3_init_module - Driver registration routine diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 848b866761df..2110fa3b4479 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -75,7 +75,7 @@ enum hns3_nic_state { #define HNS3_TX_TIMEOUT (5 * HZ) #define HNS3_RING_NAME_LEN 16 #define HNS3_BUFFER_SIZE_2048 2048 -#define HNS3_RING_MAX_PENDING 32768 +#define HNS3_RING_MAX_PENDING 32760 #define HNS3_RING_MIN_PENDING 24 #define HNS3_RING_BD_MULTIPLE 8 /* max frame size of mac */ @@ -195,7 +195,8 @@ enum hns3_nic_state { #define HNS3_VECTOR_INITED 1 #define HNS3_MAX_BD_SIZE 65535 -#define HNS3_MAX_BD_PER_FRAG 8 +#define HNS3_MAX_BD_NUM_NORMAL 8 +#define HNS3_MAX_BD_NUM_TSO 63 #define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS #define HNS3_VECTOR_GL0_OFFSET 0x100 @@ -301,7 +302,7 @@ struct hns3_desc_cb { dma_addr_t dma; /* dma address of this desc */ void *buf; /* cpu addr for a desc */ - /* priv data for the desc, e.g. skb when use with ip stack*/ + /* priv data for the desc, e.g. skb when use with ip stack */ void *priv; u32 page_offset; u32 length; /* length of the buffer */ @@ -324,11 +325,11 @@ enum hns3_pkt_l3type { HNS3_L3_TYPE_MAC_PAUSE, HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ - /* reserved for 0xA~0xB*/ + /* reserved for 0xA~0xB */ HNS3_L3_TYPE_CNM = 0xc, - /* reserved for 0xD~0xE*/ + /* reserved for 0xD~0xE */ HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */ }; @@ -353,7 +354,7 @@ enum hns3_pkt_ol3type { HNS3_OL3_TYPE_IPV4_OPT = 4, HNS3_OL3_TYPE_IPV6_EXT, - /* reserved for 0x6~0xE*/ + /* reserved for 0x6~0xE */ HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */ }; @@ -377,6 +378,10 @@ struct ring_stats { u64 restart_queue; u64 tx_busy; u64 tx_copy; + u64 tx_vlan_err; + u64 tx_l4_proto_err; + u64 tx_l2l3l4_err; + u64 tx_tso_err; }; struct { u64 rx_pkts; @@ -547,6 +552,11 @@ union l4_hdr_info { unsigned char *hdr; }; +struct hns3_hw_error_info { + enum hnae3_hw_error_type type; + const char *msg; +}; + static inline int ring_space(struct hns3_enet_ring *ring) { /* This smp_load_acquire() pairs with smp_store_release() in @@ -608,9 +618,18 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) #define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) -#define hnae3_buf_size(_ring) ((_ring)->buf_size) -#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring))) -#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring)) +#define hns3_buf_size(_ring) ((_ring)->buf_size) + +static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->buf_size > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring)) /* iterator for handling rings in ring group */ #define hns3_for_each_ring(pos, head) \ @@ -633,6 +652,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); int hns3_nic_reset_all_ring(struct hnae3_handle *h); +void hns3_fini_ring(struct hns3_enet_ring *ring); netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); bool hns3_is_phys_func(struct pci_dev *pdev); int hns3_clean_rx_ring( diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 5bff98a9b0dc..680c3508876d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -30,6 +30,10 @@ static const struct hns3_stats hns3_txq_stats[] = { HNS3_TQP_STAT("wake", restart_queue), HNS3_TQP_STAT("busy", tx_busy), HNS3_TQP_STAT("copy", tx_copy), + HNS3_TQP_STAT("vlan_err", tx_vlan_err), + HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err), + HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err), + HNS3_TQP_STAT("tso_err", tx_tso_err), }; #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) @@ -55,7 +59,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) -#define HNS3_SELF_TEST_TYPE_NUM 3 +#define HNS3_SELF_TEST_TYPE_NUM 4 #define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 @@ -85,6 +89,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) case HNAE3_LOOP_SERIAL_SERDES: case HNAE3_LOOP_PARALLEL_SERDES: case HNAE3_LOOP_APP: + case HNAE3_LOOP_PHY: ret = h->ae_algo->ops->set_loopback(h, loop, en); break; default: @@ -92,7 +97,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) break; } - if (ret) + if (ret || h->pdev->revision >= 0x21) return ret; if (en) { @@ -139,7 +144,10 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) static void hns3_lp_setup_skb(struct sk_buff *skb) { +#define HNS3_NIC_LB_DST_MAC_ADDR 0x1f + struct net_device *ndev = skb->dev; + struct hnae3_handle *handle; unsigned char *packet; struct ethhdr *ethh; unsigned int i; @@ -155,7 +163,9 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) * before the packet reaches mac or serdes, which will defect * the purpose of mac or serdes selftest. */ - ethh->h_dest[5] += 0x1f; + handle = hns3_get_handle(ndev); + if (handle->pdev->revision == 0x20) + ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR; eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); skb_reset_mac_header(skb); @@ -311,6 +321,8 @@ static void hns3_self_test(struct net_device *ndev, if (eth_test->flags != ETH_TEST_FL_OFFLINE) return; + netif_dbg(h, drv, ndev, "self test start"); + st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; st_param[HNAE3_LOOP_APP][1] = h->flags & HNAE3_SUPPORT_APP_LOOPBACK; @@ -324,6 +336,10 @@ static void hns3_self_test(struct net_device *ndev, st_param[HNAE3_LOOP_PARALLEL_SERDES][1] = h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; + st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY; + st_param[HNAE3_LOOP_PHY][1] = + h->flags & HNAE3_SUPPORT_PHY_LOOPBACK; + if (if_running) ndev->netdev_ops->ndo_stop(ndev); @@ -374,6 +390,8 @@ static void hns3_self_test(struct net_device *ndev, if (if_running) ndev->netdev_ops->ndo_open(ndev); + + netif_dbg(h, drv, ndev, "self test end\n"); } static int hns3_get_sset_count(struct net_device *netdev, int stringset) @@ -527,6 +545,7 @@ static void hns3_get_drvinfo(struct net_device *netdev, { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; + u32 fw_version; if (!h->ae_algo->ops->get_fw_version) { netdev_err(netdev, "could not get fw version!\n"); @@ -545,8 +564,18 @@ static void hns3_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->bus_info)); drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", - priv->ae_handle->ae_algo->ops->get_fw_version(h)); + fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%lu.%lu.%lu.%lu", + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); } static u32 hns3_get_link(struct net_device *netdev) @@ -593,6 +622,10 @@ static int hns3_set_pauseparam(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); + netif_dbg(h, drv, netdev, + "set pauseparam: autoneg=%u, rx:%u, tx:%u\n", + param->autoneg, param->rx_pause, param->tx_pause); + if (h->ae_algo->ops->set_pauseparam) return h->ae_algo->ops->set_pauseparam(h, param->autoneg, param->rx_pause, @@ -612,7 +645,7 @@ static void hns3_get_ksettings(struct hnae3_handle *h, &cmd->base.speed, &cmd->base.duplex); - /* 2.get link mode*/ + /* 2.get link mode */ if (ops->get_link_mode) ops->get_link_mode(h, cmd->link_modes.supported, @@ -681,7 +714,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev, return 0; } -static int hns3_check_ksettings_param(struct net_device *netdev, +static int hns3_check_ksettings_param(const struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct hnae3_handle *handle = hns3_get_handle(netdev); @@ -693,6 +726,12 @@ static int hns3_check_ksettings_param(struct net_device *netdev, u8 duplex; int ret; + /* hw doesn't support use specified speed and duplex to negotiate, + * unnecessary to check them when autoneg on. + */ + if (cmd->base.autoneg) + return 0; + if (ops->get_ksettings_an_result) { ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex); if (cmd->base.autoneg == autoneg && cmd->base.speed == speed && @@ -726,12 +765,17 @@ static int hns3_set_link_ksettings(struct net_device *netdev, { struct hnae3_handle *handle = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops = handle->ae_algo->ops; - int ret = 0; + int ret; /* Chip don't support this mode. */ if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) return -EINVAL; + netif_dbg(handle, drv, netdev, + "set link(%s): autoneg=%u, speed=%u, duplex=%u\n", + netdev->phydev ? "phy" : "mac", + cmd->base.autoneg, cmd->base.speed, cmd->base.duplex); + /* Only support ksettings_set for netdev with phy attached for now */ if (netdev->phydev) return phy_ethtool_ksettings_set(netdev->phydev, cmd); @@ -749,6 +793,15 @@ static int hns3_set_link_ksettings(struct net_device *netdev, return ret; } + /* hw doesn't support use specified speed and duplex to negotiate, + * ignore them when autoneg on. + */ + if (cmd->base.autoneg) { + netdev_info(netdev, + "autoneg is on, ignore the speed and duplex\n"); + return 0; + } + if (ops->cfg_mac_speed_dup_h) ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed, cmd->base.duplex); @@ -843,8 +896,8 @@ static int hns3_get_rxnfc(struct net_device *netdev, } } -static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, - u32 tx_desc_num, u32 rx_desc_num) +static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, + u32 tx_desc_num, u32 rx_desc_num) { struct hnae3_handle *h = priv->ae_handle; int i; @@ -857,21 +910,31 @@ static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num = rx_desc_num; } - - return hns3_init_all_ring(priv); } -static int hns3_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *param) +static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - bool if_running = netif_running(ndev); - u32 old_tx_desc_num, new_tx_desc_num; - u32 old_rx_desc_num, new_rx_desc_num; - int queue_num = h->kinfo.num_tqps; - int ret; + struct hnae3_handle *handle = priv->ae_handle; + struct hns3_enet_ring *tmp_rings; + int i; + + tmp_rings = kcalloc(handle->kinfo.num_tqps * 2, + sizeof(struct hns3_enet_ring), GFP_KERNEL); + if (!tmp_rings) + return NULL; + + for (i = 0; i < handle->kinfo.num_tqps * 2; i++) { + memcpy(&tmp_rings[i], priv->ring_data[i].ring, + sizeof(struct hns3_enet_ring)); + tmp_rings[i].skb = NULL; + } + + return tmp_rings; +} +static int hns3_check_ringparam(struct net_device *ndev, + struct ethtool_ringparam *param) +{ if (hns3_nic_resetting(ndev)) return -EBUSY; @@ -887,6 +950,25 @@ static int hns3_set_ringparam(struct net_device *ndev, return -EINVAL; } + return 0; +} + +static int hns3_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_ring *tmp_rings; + bool if_running = netif_running(ndev); + u32 old_tx_desc_num, new_tx_desc_num; + u32 old_rx_desc_num, new_rx_desc_num; + u16 queue_num = h->kinfo.num_tqps; + int ret, i; + + ret = hns3_check_ringparam(ndev, param); + if (ret) + return ret; + /* Hardware requires that its descriptors must be multiple of eight */ new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); @@ -896,6 +978,13 @@ static int hns3_set_ringparam(struct net_device *ndev, old_rx_desc_num == new_rx_desc_num) return 0; + tmp_rings = hns3_backup_ringparam(priv); + if (!tmp_rings) { + netdev_err(ndev, + "backup ring param failed by allocating memory fail\n"); + return -ENOMEM; + } + netdev_info(ndev, "Changing Tx/Rx ring depth from %d/%d to %d/%d\n", old_tx_desc_num, old_rx_desc_num, @@ -904,22 +993,24 @@ static int hns3_set_ringparam(struct net_device *ndev, if (if_running) ndev->netdev_ops->ndo_stop(ndev); - ret = hns3_uninit_all_ring(priv); - if (ret) - return ret; - - ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num, - new_rx_desc_num); + hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num); + ret = hns3_init_all_ring(priv); if (ret) { - ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num, - old_rx_desc_num); - if (ret) { - netdev_err(ndev, - "Revert to old bd num fail, ret=%d.\n", ret); - return ret; - } + netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n", + ret); + + hns3_change_all_ring_bd_num(priv, old_tx_desc_num, + old_rx_desc_num); + for (i = 0; i < h->kinfo.num_tqps * 2; i++) + memcpy(priv->ring_data[i].ring, &tmp_rings[i], + sizeof(struct hns3_enet_ring)); + } else { + for (i = 0; i < h->kinfo.num_tqps * 2; i++) + hns3_fini_ring(&tmp_rings[i]); } + kfree(tmp_rings); + if (if_running) ret = ndev->netdev_ops->ndo_open(ndev); @@ -973,6 +1064,9 @@ static int hns3_nway_reset(struct net_device *netdev) return -EINVAL; } + netif_dbg(handle, drv, netdev, + "nway reset (using %s)\n", phy ? "phy" : "mac"); + if (phy) return genphy_restart_aneg(phy); @@ -1297,6 +1391,9 @@ static int hns3_set_fecparam(struct net_device *netdev, if (!ops->set_fec) return -EOPNOTSUPP; fec_mode = eth_to_loc_fec(fec->fec); + + netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode); + return ops->set_fec(handle, fec_mode); } @@ -1315,6 +1412,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .set_rxfh = hns3_set_rss, .get_link_ksettings = hns3_get_link_ksettings, .get_channels = hns3_get_channels, + .set_channels = hns3_set_channels, .get_coalesce = hns3_get_coalesce, .set_coalesce = hns3_set_coalesce, .get_regs_len = hns3_get_regs_len, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 22f6acd45d9a..ecf58cfd253d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -103,14 +103,17 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) dma_addr_t dma = ring->desc_dma_addr; struct hclge_dev *hdev = ring->dev; struct hclge_hw *hw = &hdev->hw; + u32 reg_val; if (ring->ring_type == HCLGE_TYPE_CSQ) { hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, upper_32_bits(dma)); - hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, - ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S); + reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG); + reg_val &= HCLGE_NIC_SW_RST_RDY; + reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S; + hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val); hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); } else { @@ -383,6 +386,23 @@ err_csq: return ret; } +static int hclge_firmware_compat_config(struct hclge_dev *hdev) +{ + struct hclge_firmware_compat_cmd *req; + struct hclge_desc desc; + u32 compat = 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false); + + req = (struct hclge_firmware_compat_cmd *)desc.data; + + hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1); + hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1); + req->compat = cpu_to_le32(compat); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + int hclge_cmd_init(struct hclge_dev *hdev) { u32 version; @@ -419,7 +439,24 @@ int hclge_cmd_init(struct hclge_dev *hdev) } hdev->fw_version = version; - dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); + dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n", + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + /* ask the firmware to enable some features, driver can work without + * it. + */ + ret = hclge_firmware_compat_config(hdev); + if (ret) + dev_warn(&hdev->pdev->dev, + "Firmware compatible features not enabled(%d).\n", + ret); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 96840d8f3e24..4821fe08b5e4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -86,6 +86,8 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_PF_RSRC = 0x0023, HCLGE_OPC_QUERY_VF_RSRC = 0x0024, HCLGE_OPC_GET_CFG_PARAM = 0x0025, + HCLGE_OPC_PF_RST_DONE = 0x0026, + HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027, HCLGE_OPC_STATS_64_BIT = 0x0030, HCLGE_OPC_STATS_32_BIT = 0x0031, @@ -221,6 +223,9 @@ enum hclge_opcode_type { HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, + /* MAC VLAN commands */ + HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033, + /* VLAN commands */ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, @@ -257,6 +262,7 @@ enum hclge_opcode_type { /* M7 stats command */ HCLGE_OPC_M7_STATS_BD = 0x7012, HCLGE_OPC_M7_STATS_INFO = 0x7013, + HCLGE_OPC_M7_COMPAT_CFG = 0x701A, /* SFP command */ HCLGE_OPC_GET_SFP_INFO = 0x7104, @@ -586,6 +592,12 @@ struct hclge_config_mac_mode_cmd { u8 rsv[20]; }; +struct hclge_pf_rst_sync_cmd { +#define HCLGE_PF_RST_ALL_VF_RDY_B 0 + u8 all_vf_ready; + u8 rsv[23]; +}; + #define HCLGE_CFG_SPEED_S 0 #define HCLGE_CFG_SPEED_M GENMASK(5, 0) @@ -762,6 +774,31 @@ struct hclge_vlan_filter_vf_cfg_cmd { u8 vf_bitmap[16]; }; +#define HCLGE_SWITCH_ANTI_SPOOF_B 0U +#define HCLGE_SWITCH_ALW_LPBK_B 1U +#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U +#define HCLGE_SWITCH_ALW_DST_OVRD_B 3U +#define HCLGE_SWITCH_NO_MASK 0x0 +#define HCLGE_SWITCH_ANTI_SPOOF_MASK 0xFE +#define HCLGE_SWITCH_ALW_LPBK_MASK 0xFD +#define HCLGE_SWITCH_ALW_LCL_LPBK_MASK 0xFB +#define HCLGE_SWITCH_LW_DST_OVRD_MASK 0xF7 + +struct hclge_mac_vlan_switch_cmd { + u8 roce_sel; + u8 rsv1[3]; + __le32 func_id; + u8 switch_param; + u8 rsv2[3]; + u8 param_mask; + u8 rsv3[11]; +}; + +enum hclge_mac_vlan_cfg_sel { + HCLGE_MAC_VLAN_NIC_SEL = 0, + HCLGE_MAC_VLAN_ROCE_SEL, +}; + #define HCLGE_ACCEPT_TAG1_B 0 #define HCLGE_ACCEPT_UNTAG1_B 1 #define HCLGE_PORT_INS_TAG1_EN_B 2 @@ -827,7 +864,7 @@ struct hclge_mac_ethertype_idx_rd_cmd { u8 flags; u8 resp_code; __le16 vlan_tag; - u8 mac_add[6]; + u8 mac_addr[6]; __le16 index; __le16 ethter_type; __le16 egress_port; @@ -877,6 +914,13 @@ struct hclge_reset_cmd { u8 rsv[22]; }; +#define HCLGE_PF_RESET_DONE_BIT BIT(0) + +struct hclge_pf_rst_done_cmd { + u8 pf_rst_done; + u8 rsv[23]; +}; + #define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0) #define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2) #define HCLGE_CMD_SERDES_DONE_B BIT(0) @@ -906,8 +950,11 @@ struct hclge_serdes_lb_cmd { #define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 #define HCLGE_NIC_CRQ_TAIL_REG 0x27024 #define HCLGE_NIC_CRQ_HEAD_REG 0x27028 -#define HCLGE_NIC_CMQ_EN_B 16 -#define HCLGE_NIC_CMQ_ENABLE BIT(HCLGE_NIC_CMQ_EN_B) + +/* this bit indicates that the driver is ready for hardware reset */ +#define HCLGE_NIC_SW_RST_RDY_B 16 +#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B) + #define HCLGE_NIC_CMQ_DESC_NUM 1024 #define HCLGE_NIC_CMQ_DESC_NUM_S 3 @@ -1009,6 +1056,13 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd { u8 rsv[4]; }; +#define HCLGE_LINK_EVENT_REPORT_EN_B 0 +#define HCLGE_NCSI_ERROR_REPORT_EN_B 1 +struct hclge_firmware_compat_cmd { + __le32 compat; + u8 rsv[20]; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index bac4ce13f6ae..c063301d6060 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -198,9 +198,32 @@ static int hclge_client_setup_tc(struct hclge_dev *hdev) return 0; } +static int hclge_notify_down_uinit(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); +} + +static int hclge_notify_init_up(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); +} + static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) { struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; struct hclge_dev *hdev = vport->back; bool map_changed = false; u8 num_tc = 0; @@ -215,11 +238,9 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) return ret; if (map_changed) { - ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); - if (ret) - return ret; + netif_dbg(h, drv, netdev, "set ets\n"); - ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + ret = hclge_notify_down_uinit(hdev); if (ret) return ret; } @@ -239,11 +260,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) goto err_out; - ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); - if (ret) - return ret; - - ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); + ret = hclge_notify_init_up(hdev); if (ret) return ret; } @@ -254,10 +271,8 @@ err_out: if (!map_changed) return ret; - if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT)) - return ret; + hclge_notify_init_up(hdev); - hclge_notify_client(hdev, HNAE3_UP_CLIENT); return ret; } @@ -300,6 +315,7 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) { struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; struct hclge_dev *hdev = vport->back; u8 i, j, pfc_map, *prio_tc; @@ -325,6 +341,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) hdev->tm_info.hw_pfc_map = pfc_map; hdev->tm_info.pfc_en = pfc->pfc_en; + netif_dbg(h, drv, netdev, + "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n", + pfc->pfc_en, pfc_map, hdev->tm_info.num_tc); + hclge_tm_pfc_info_update(hdev); return hclge_pause_setup_hw(hdev, false); @@ -345,8 +365,11 @@ static u8 hclge_getdcbx(struct hnae3_handle *h) static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) { struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; struct hclge_dev *hdev = vport->back; + netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode); + /* No support for LLD_MANAGED modes or CEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || (mode & DCB_CAP_DCBX_VER_CEE) || @@ -372,11 +395,7 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) if (ret) return -EINVAL; - ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); - if (ret) - return ret; - - ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + ret = hclge_notify_down_uinit(hdev); if (ret) return ret; @@ -398,17 +417,11 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) else hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE; - ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); - if (ret) - return ret; - - return hclge_notify_client(hdev, HNAE3_UP_CLIENT); + return hclge_notify_init_up(hdev); err_out: - if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT)) - return ret; + hclge_notify_init_up(hdev); - hclge_notify_client(hdev, HNAE3_UP_CLIENT); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index ab625c757a95..d0128d792717 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -4,32 +4,92 @@ #include <linux/device.h> #include "hclge_debugfs.h" -#include "hclge_cmd.h" #include "hclge_main.h" #include "hclge_tm.h" #include "hnae3.h" +static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { + { .reg_type = "bios common", + .dfx_msg = &hclge_dbg_bios_common_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg), + .offset = HCLGE_DBG_DFX_BIOS_OFFSET, + .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } }, + { .reg_type = "ssu", + .dfx_msg = &hclge_dbg_ssu_reg_0[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0), + .offset = HCLGE_DBG_DFX_SSU_0_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_0 } }, + { .reg_type = "ssu", + .dfx_msg = &hclge_dbg_ssu_reg_1[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1), + .offset = HCLGE_DBG_DFX_SSU_1_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_1 } }, + { .reg_type = "ssu", + .dfx_msg = &hclge_dbg_ssu_reg_2[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2), + .offset = HCLGE_DBG_DFX_SSU_2_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_2 } }, + { .reg_type = "igu egu", + .dfx_msg = &hclge_dbg_igu_egu_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg), + .offset = HCLGE_DBG_DFX_IGU_OFFSET, + .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } }, + { .reg_type = "rpu", + .dfx_msg = &hclge_dbg_rpu_reg_0[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0), + .offset = HCLGE_DBG_DFX_RPU_0_OFFSET, + .cmd = HCLGE_OPC_DFX_RPU_REG_0 } }, + { .reg_type = "rpu", + .dfx_msg = &hclge_dbg_rpu_reg_1[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1), + .offset = HCLGE_DBG_DFX_RPU_1_OFFSET, + .cmd = HCLGE_OPC_DFX_RPU_REG_1 } }, + { .reg_type = "ncsi", + .dfx_msg = &hclge_dbg_ncsi_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg), + .offset = HCLGE_DBG_DFX_NCSI_OFFSET, + .cmd = HCLGE_OPC_DFX_NCSI_REG } }, + { .reg_type = "rtc", + .dfx_msg = &hclge_dbg_rtc_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg), + .offset = HCLGE_DBG_DFX_RTC_OFFSET, + .cmd = HCLGE_OPC_DFX_RTC_REG } }, + { .reg_type = "ppp", + .dfx_msg = &hclge_dbg_ppp_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg), + .offset = HCLGE_DBG_DFX_PPP_OFFSET, + .cmd = HCLGE_OPC_DFX_PPP_REG } }, + { .reg_type = "rcb", + .dfx_msg = &hclge_dbg_rcb_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg), + .offset = HCLGE_DBG_DFX_RCB_OFFSET, + .cmd = HCLGE_OPC_DFX_RCB_REG } }, + { .reg_type = "tqp", + .dfx_msg = &hclge_dbg_tqp_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg), + .offset = HCLGE_DBG_DFX_TQP_OFFSET, + .cmd = HCLGE_OPC_DFX_TQP_REG } }, +}; + static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) { - struct hclge_desc desc[4]; - int ret; +#define HCLGE_GET_DFX_REG_TYPE_CNT 4 - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true); - desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true); - desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true); + struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; + int entries_per_desc; + int index; + int ret; - ret = hclge_cmd_send(&hdev->hw, desc, 4); - if (ret != HCLGE_CMD_EXEC_SUCCESS) { + ret = hclge_query_bd_num_cmd_send(hdev, desc); + if (ret) { dev_err(&hdev->pdev->dev, - "get dfx bdnum fail, status is %d.\n", ret); + "get dfx bdnum fail, ret = %d\n", ret); return ret; } - return (int)desc[offset / 6].data[offset % 6]; + entries_per_desc = ARRAY_SIZE(desc[0].data); + index = offset % entries_per_desc; + return (int)desc[offset / entries_per_desc].data[index]; } static int hclge_dbg_cmd_send(struct hclge_dev *hdev, @@ -50,35 +110,40 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev, } ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, - "read reg cmd send fail, status is %d.\n", ret); - return ret; - } - + "cmd(0x%x) send fail, ret = %d\n", cmd, ret); return ret; } static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, - struct hclge_dbg_dfx_message *dfx_message, - const char *cmd_buf, int msg_num, - int offset, enum hclge_opcode_type cmd) + struct hclge_dbg_reg_type_info *reg_info, + const char *cmd_buf) { -#define BD_DATA_NUM 6 +#define IDX_OFFSET 1 + const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET]; + struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; + struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; struct hclge_desc *desc_src; struct hclge_desc *desc; + int entries_per_desc; int bd_num, buf_len; + int index = 0; + int min_num; int ret, i; - int index; - int max; - ret = kstrtouint(cmd_buf, 10, &index); - index = (ret != 0) ? 0 : index; + if (*s) { + ret = kstrtouint(s, 0, &index); + index = (ret != 0) ? 0 : index; + } - bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset); - if (bd_num <= 0) + bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset); + if (bd_num <= 0) { + dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n", + reg_msg->offset, bd_num); return; + } buf_len = sizeof(struct hclge_desc) * bd_num; desc_src = kzalloc(buf_len, GFP_KERNEL); @@ -88,22 +153,23 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, } desc = desc_src; - ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, cmd); - if (ret != HCLGE_CMD_EXEC_SUCCESS) { + ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd); + if (ret) { kfree(desc_src); return; } - max = (bd_num * BD_DATA_NUM) <= msg_num ? - (bd_num * BD_DATA_NUM) : msg_num; + entries_per_desc = ARRAY_SIZE(desc->data); + min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num); desc = desc_src; - for (i = 0; i < max; i++) { - ((i > 0) && ((i % BD_DATA_NUM) == 0)) ? desc++ : desc; + for (i = 0; i < min_num; i++) { + if (i > 0 && (i % entries_per_desc) == 0) + desc++; if (dfx_message->flag) dev_info(&hdev->pdev->dev, "%s: 0x%x\n", dfx_message->message, - desc->data[i % BD_DATA_NUM]); + desc->data[i % entries_per_desc]); dfx_message++; } @@ -213,95 +279,25 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf) static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf) { - int msg_num; - - if (strncmp(&cmd_buf[9], "bios common", 11) == 0) { - msg_num = sizeof(hclge_dbg_bios_common_reg) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg, - &cmd_buf[21], msg_num, - HCLGE_DBG_DFX_BIOS_OFFSET, - HCLGE_OPC_DFX_BIOS_COMMON_REG); - } else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) { - msg_num = sizeof(hclge_dbg_ssu_reg_0) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0, - &cmd_buf[13], msg_num, - HCLGE_DBG_DFX_SSU_0_OFFSET, - HCLGE_OPC_DFX_SSU_REG_0); - - msg_num = sizeof(hclge_dbg_ssu_reg_1) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1, - &cmd_buf[13], msg_num, - HCLGE_DBG_DFX_SSU_1_OFFSET, - HCLGE_OPC_DFX_SSU_REG_1); - - msg_num = sizeof(hclge_dbg_ssu_reg_2) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2, - &cmd_buf[13], msg_num, - HCLGE_DBG_DFX_SSU_2_OFFSET, - HCLGE_OPC_DFX_SSU_REG_2); - } else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) { - msg_num = sizeof(hclge_dbg_igu_egu_reg) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg, - &cmd_buf[17], msg_num, - HCLGE_DBG_DFX_IGU_OFFSET, - HCLGE_OPC_DFX_IGU_EGU_REG); - } else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) { - msg_num = sizeof(hclge_dbg_rpu_reg_0) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0, - &cmd_buf[13], msg_num, - HCLGE_DBG_DFX_RPU_0_OFFSET, - HCLGE_OPC_DFX_RPU_REG_0); - - msg_num = sizeof(hclge_dbg_rpu_reg_1) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1, - &cmd_buf[13], msg_num, - HCLGE_DBG_DFX_RPU_1_OFFSET, - HCLGE_OPC_DFX_RPU_REG_1); - } else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) { - msg_num = sizeof(hclge_dbg_ncsi_reg) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg, - &cmd_buf[14], msg_num, - HCLGE_DBG_DFX_NCSI_OFFSET, - HCLGE_OPC_DFX_NCSI_REG); - } else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) { - msg_num = sizeof(hclge_dbg_rtc_reg) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg, - &cmd_buf[13], msg_num, - HCLGE_DBG_DFX_RTC_OFFSET, - HCLGE_OPC_DFX_RTC_REG); - } else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) { - msg_num = sizeof(hclge_dbg_ppp_reg) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg, - &cmd_buf[13], msg_num, - HCLGE_DBG_DFX_PPP_OFFSET, - HCLGE_OPC_DFX_PPP_REG); - } else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) { - msg_num = sizeof(hclge_dbg_rcb_reg) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg, - &cmd_buf[13], msg_num, - HCLGE_DBG_DFX_RCB_OFFSET, - HCLGE_OPC_DFX_RCB_REG); - } else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) { - msg_num = sizeof(hclge_dbg_tqp_reg) / - sizeof(struct hclge_dbg_dfx_message); - hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg, - &cmd_buf[13], msg_num, - HCLGE_DBG_DFX_TQP_OFFSET, - HCLGE_OPC_DFX_TQP_REG); - } else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) { - hclge_dbg_dump_dcb(hdev, &cmd_buf[13]); - } else { + struct hclge_dbg_reg_type_info *reg_info; + bool has_dump = false; + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) { + reg_info = &hclge_dbg_reg_info[i]; + if (!strncmp(cmd_buf, reg_info->reg_type, + strlen(reg_info->reg_type))) { + hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf); + has_dump = true; + } + } + + if (strncmp(cmd_buf, "dcb", 3) == 0) { + hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]); + has_dump = true; + } + + if (!has_dump) { dev_info(&hdev->pdev->dev, "unknown command\n"); return; } @@ -325,11 +321,17 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev) struct hclge_desc desc; int i, ret; + if (!hnae3_dev_dcb_supported(hdev)) { + dev_info(&hdev->pdev->dev, + "Only DCB-supported dev supports tc\n"); + return; + } + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret); + dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret); return; } @@ -409,6 +411,12 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]); + if (!hnae3_dev_dcb_supported(hdev)) { + dev_info(&hdev->pdev->dev, + "Only DCB-supported dev supports tm mapping\n"); + return; + } + cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; hclge_cmd_setup_basic_desc(&desc, cmd, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -425,7 +433,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) return; err_tm_pg_cmd_send: - dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n", + dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n", cmd, ret); } @@ -537,7 +545,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) return; err_tm_cmd_send: - dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n", + dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n", cmd, ret); } @@ -556,7 +564,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, int pri_id, ret; u32 i; - ret = kstrtouint(&cmd_buf[12], 10, &queue_id); + ret = kstrtouint(cmd_buf, 0, &queue_id); queue_id = (ret != 0) ? 0 : queue_id; cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; @@ -590,6 +598,12 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n", queue_id, qset_id, pri_id, tc_id); + if (!hnae3_dev_dcb_supported(hdev)) { + dev_info(&hdev->pdev->dev, + "Only DCB-supported dev supports tm mapping\n"); + return; + } + cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; for (group_id = 0; group_id < 32; group_id++) { @@ -620,7 +634,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, return; err_tm_map_cmd_send: - dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), status is %d\n", + dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n", cmd, ret); } @@ -634,7 +648,7 @@ static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n", + dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n", ret); return; } @@ -658,7 +672,7 @@ static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev) ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "dump qos pri map fail, status is %d.\n", ret); + "dump qos pri map fail, ret = %d\n", ret); return; } @@ -715,6 +729,34 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", rx_buf_cmd->shared_buf); + cmd = HCLGE_OPC_RX_COM_WL_ALLOC; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; + dev_info(&hdev->pdev->dev, "\n"); + dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", + rx_com_wl->com_wl.high, rx_com_wl->com_wl.low); + + cmd = HCLGE_OPC_RX_GBL_PKT_CNT; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; + dev_info(&hdev->pdev->dev, + "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", + rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low); + dev_info(&hdev->pdev->dev, "\n"); + + if (!hnae3_dev_dcb_supported(hdev)) { + dev_info(&hdev->pdev->dev, + "Only DCB-supported dev supports rx priv wl\n"); + return; + } cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC; hclge_cmd_setup_basic_desc(&desc[0], cmd, true); desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); @@ -723,7 +765,6 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) if (ret) goto err_qos_cmd_send; - dev_info(&hdev->pdev->dev, "\n"); rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) dev_info(&hdev->pdev->dev, @@ -733,7 +774,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) dev_info(&hdev->pdev->dev, - "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4, + "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", + i + HCLGE_TC_NUM_ONE_DESC, rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low); cmd = HCLGE_OPC_RX_COM_THRD_ALLOC; @@ -755,37 +797,15 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) dev_info(&hdev->pdev->dev, - "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4, + "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", + i + HCLGE_TC_NUM_ONE_DESC, rx_com_thrd->com_thrd[i].high, rx_com_thrd->com_thrd[i].low); - - cmd = HCLGE_OPC_RX_COM_WL_ALLOC; - hclge_cmd_setup_basic_desc(desc, cmd, true); - ret = hclge_cmd_send(&hdev->hw, desc, 1); - if (ret) - goto err_qos_cmd_send; - - rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; - dev_info(&hdev->pdev->dev, "\n"); - dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", - rx_com_wl->com_wl.high, rx_com_wl->com_wl.low); - - cmd = HCLGE_OPC_RX_GBL_PKT_CNT; - hclge_cmd_setup_basic_desc(desc, cmd, true); - ret = hclge_cmd_send(&hdev->hw, desc, 1); - if (ret) - goto err_qos_cmd_send; - - rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; - dev_info(&hdev->pdev->dev, - "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", - rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low); - return; err_qos_cmd_send: dev_err(&hdev->pdev->dev, - "dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret); + "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret); } static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) @@ -825,9 +845,9 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); snprintf(printf_buf, HCLGE_DBG_BUF_LEN, "%02u |%02x:%02x:%02x:%02x:%02x:%02x|", - req0->index, req0->mac_add[0], req0->mac_add[1], - req0->mac_add[2], req0->mac_add[3], req0->mac_add[4], - req0->mac_add[5]); + req0->index, req0->mac_addr[0], req0->mac_addr[1], + req0->mac_addr[2], req0->mac_addr[3], + req0->mac_addr[4], req0->mac_addr[5]); snprintf(printf_buf + strlen(printf_buf), HCLGE_DBG_BUF_LEN - strlen(printf_buf), @@ -883,14 +903,17 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage, dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n", sel_x ? "x" : "y", loc); + /* tcam_data0 ~ tcam_data1 */ req = (u32 *)req1->tcam_data; for (i = 0; i < 2; i++) dev_info(&hdev->pdev->dev, "%08x\n", *req++); + /* tcam_data2 ~ tcam_data7 */ req = (u32 *)req2->tcam_data; for (i = 0; i < 6; i++) dev_info(&hdev->pdev->dev, "%08x\n", *req++); + /* tcam_data8 ~ tcam_data12 */ req = (u32 *)req3->tcam_data; for (i = 0; i < 5; i++) dev_info(&hdev->pdev->dev, "%08x\n", *req++); @@ -908,25 +931,39 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) { - dev_info(&hdev->pdev->dev, "PF reset count: %d\n", + dev_info(&hdev->pdev->dev, "PF reset count: %u\n", hdev->rst_stats.pf_rst_cnt); - dev_info(&hdev->pdev->dev, "FLR reset count: %d\n", + dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", hdev->rst_stats.flr_rst_cnt); - dev_info(&hdev->pdev->dev, "CORE reset count: %d\n", - hdev->rst_stats.core_rst_cnt); - dev_info(&hdev->pdev->dev, "GLOBAL reset count: %d\n", + dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n", hdev->rst_stats.global_rst_cnt); - dev_info(&hdev->pdev->dev, "IMP reset count: %d\n", + dev_info(&hdev->pdev->dev, "IMP reset count: %u\n", hdev->rst_stats.imp_rst_cnt); - dev_info(&hdev->pdev->dev, "reset done count: %d\n", + dev_info(&hdev->pdev->dev, "reset done count: %u\n", hdev->rst_stats.reset_done_cnt); - dev_info(&hdev->pdev->dev, "HW reset done count: %d\n", + dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", hdev->rst_stats.hw_reset_done_cnt); - dev_info(&hdev->pdev->dev, "reset count: %d\n", + dev_info(&hdev->pdev->dev, "reset count: %u\n", + hdev->rst_stats.reset_cnt); + dev_info(&hdev->pdev->dev, "reset count: %u\n", hdev->rst_stats.reset_cnt); + dev_info(&hdev->pdev->dev, "reset fail count: %u\n", + hdev->rst_stats.reset_fail_cnt); + dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE)); + dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG)); + dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS)); + dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); + dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG)); + dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING)); } -void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev) +static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev) { struct hclge_desc *desc_src, *desc_tmp; struct hclge_get_m7_bd_cmd *req; @@ -940,7 +977,7 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev) ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "get firmware statistics bd number failed, ret=%d\n", + "get firmware statistics bd number failed, ret = %d\n", ret); return; } @@ -961,7 +998,7 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev) if (ret) { kfree(desc_src); dev_err(&hdev->pdev->dev, - "get firmware statistics failed, ret=%d\n", ret); + "get firmware statistics failed, ret = %d\n", ret); return; } @@ -981,6 +1018,33 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev) kfree(desc_src); } +#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5 + +static void hclge_ncl_config_data_print(struct hclge_dev *hdev, + struct hclge_desc *desc, int *offset, + int *length) +{ +#define HCLGE_CMD_DATA_NUM 6 + + int i; + int j; + + for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) { + for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { + if (i == 0 && j == 0) + continue; + + dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n", + *offset, + le32_to_cpu(desc[i].data[j])); + *offset += sizeof(u32); + *length -= sizeof(u32); + if (*length <= 0) + return; + } + } +} + /* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file * @hdev: pointer to struct hclge_dev * @cmd_buf: string that contains offset and length @@ -990,17 +1054,13 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, { #define HCLGE_MAX_NCL_CONFIG_OFFSET 4096 #define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4) -#define HCLGE_CMD_DATA_NUM 6 - struct hclge_desc desc[5]; - u32 byte_offset; - int bd_num = 5; + struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM]; + int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM; int offset; int length; int data0; int ret; - int i; - int j; ret = sscanf(cmd_buf, "%x %x", &offset, &length); if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET || @@ -1026,22 +1086,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, if (ret) return; - byte_offset = offset; - for (i = 0; i < bd_num; i++) { - for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { - if (i == 0 && j == 0) - continue; - - dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n", - byte_offset, - le32_to_cpu(desc[i].data[j])); - byte_offset += sizeof(u32); - length -= sizeof(u32); - if (length <= 0) - return; - } - } - offset += HCLGE_MAX_NCL_CONFIG_LENGTH; + hclge_ncl_config_data_print(hdev, desc, &offset, &length); } } @@ -1067,6 +1112,9 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev) int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) { +#define DUMP_REG "dump reg" +#define DUMP_TM_MAP "dump tm map" + struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -1074,8 +1122,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) hclge_dbg_fd_tcam(hdev); } else if (strncmp(cmd_buf, "dump tc", 7) == 0) { hclge_dbg_dump_tc(hdev); - } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) { - hclge_dbg_dump_tm_map(hdev, cmd_buf); + } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) { + hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]); } else if (strncmp(cmd_buf, "dump tm", 7) == 0) { hclge_dbg_dump_tm(hdev); } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) { @@ -1086,8 +1134,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) hclge_dbg_dump_qos_buf_cfg(hdev); } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) { hclge_dbg_dump_mng_table(hdev); - } else if (strncmp(cmd_buf, "dump reg", 8) == 0) { - hclge_dbg_dump_reg_cmd(hdev, cmd_buf); + } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) { + hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]); } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) { hclge_dbg_dump_rst_info(hdev); } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h index d055fda41775..38b79321c4c4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -4,6 +4,9 @@ #ifndef __HCLGE_DEBUGFS_H #define __HCLGE_DEBUGFS_H +#include <linux/etherdevice.h> +#include "hclge_cmd.h" + #define HCLGE_DBG_BUF_LEN 256 #define HCLGE_DBG_MNG_TBL_MAX 64 @@ -63,9 +66,23 @@ struct hclge_dbg_bitmap_cmd { }; }; +struct hclge_dbg_reg_common_msg { + int msg_num; + int offset; + enum hclge_opcode_type cmd; +}; + +#define HCLGE_DBG_MAX_DFX_MSG_LEN 60 struct hclge_dbg_dfx_message { int flag; - char message[60]; + char message[HCLGE_DBG_MAX_DFX_MSG_LEN]; +}; + +#define HCLGE_DBG_MAC_REG_TYPE_LEN 32 +struct hclge_dbg_reg_type_info { + const char *reg_type; + struct hclge_dbg_dfx_message *dfx_msg; + struct hclge_dbg_reg_common_msg reg_msg; }; #pragma pack() diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 85a3b062371d..87dece0e745d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -637,8 +637,8 @@ static void hclge_log_error(struct device *dev, char *reg, { while (err->msg) { if (err->int_msk & err_sts) { - dev_warn(dev, "%s %s found [error status=0x%x]\n", - reg, err->msg, err_sts); + dev_err(dev, "%s %s found [error status=0x%x]\n", + reg, err->msg, err_sts); if (err->reset_level && err->reset_level != HNAE3_NONE_RESET) set_bit(err->reset_level, reset_requests); @@ -652,16 +652,11 @@ static void hclge_log_error(struct device *dev, char *reg, * @desc: descriptor for describing the command * @cmd: command opcode * @flag: flag for extended command structure - * @w_num: offset for setting the read interrupt type. - * @int_type: select which type of the interrupt for which the error - * info will be read(RAS-CE/RAS-NFE/RAS-FE etc). * * This function query the error info from hw register/s using command */ static int hclge_cmd_query_error(struct hclge_dev *hdev, - struct hclge_desc *desc, u32 cmd, - u16 flag, u8 w_num, - enum hclge_err_int_type int_type) + struct hclge_desc *desc, u32 cmd, u16 flag) { struct device *dev = &hdev->pdev->dev; int desc_num = 1; @@ -673,8 +668,6 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, hclge_cmd_setup_basic_desc(&desc[1], cmd, true); desc_num = 2; } - if (w_num) - desc[0].data[w_num] = cpu_to_le32(int_type); ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num); if (ret) @@ -872,8 +865,7 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en) } /* configure TM QCN hw errors */ - ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, - 0, 0, 0); + ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0); if (ret) { dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret); return ret; @@ -938,32 +930,44 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, /* configure PPU error interrupts */ if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) { hclge_cmd_setup_basic_desc(&desc[0], cmd, false); - desc[0].flag |= HCLGE_CMD_FLAG_NEXT; + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], cmd, false); if (en) { - desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN; - desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN; - desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN; - desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN; + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN); + desc[1].data[3] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN); + desc[1].data[4] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN); } - desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK; - desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK; - desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK; - desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK; + desc[1].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK); + desc[1].data[2] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK); + desc[1].data[3] |= + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK); desc_num = 2; } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { hclge_cmd_setup_basic_desc(&desc[0], cmd, false); if (en) - desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2; + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2); - desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK; + desc[0].data[2] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK); } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) { hclge_cmd_setup_basic_desc(&desc[0], cmd, false); if (en) - desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN; + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN); - desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK; + desc[0].data[2] = + cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK); } else { dev_err(dev, "Invalid cmd to configure PPU error interrupts\n"); return -EINVAL; @@ -1171,8 +1175,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, status = le32_to_cpu(*(desc_data + 3)) & BIT(0); if (status) { - dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", - status); + dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", + status); set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req); } @@ -1208,8 +1212,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, desc_data = (__le32 *)&desc[5]; status = le32_to_cpu(*(desc_data + 1)); if (status) { - dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n", - "rpu_rx_pkt_ecc_mbit_err"); + dev_err(dev, + "PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n"); set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req); } @@ -1321,10 +1325,12 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, /* log PPU(RCB) errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; - if (status) + if (status) { hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", &hclge_ppu_pf_abnormal_int[0], status, &ae_dev->hw_err_reset_req); + hclge_report_hw_error(hdev, HNAE3_PPU_POISON_ERROR); + } /* clear all PF RAS errors */ hclge_cmd_reuse_desc(&desc[0], false); @@ -1387,17 +1393,17 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev) return ret; } - dev_info(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n", - le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), - le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), - le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); - dev_info(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n", - le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]), - le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]), - le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5])); - dev_info(dev, "AXI3: %08X %08X %08X %08X\n", - le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]), - le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3])); + dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), + le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), + le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); + dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]), + le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]), + le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5])); + dev_err(dev, "AXI3: %08X %08X %08X %08X\n", + le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]), + le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3])); return 0; } @@ -1410,18 +1416,18 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev) ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD, - HCLGE_CMD_FLAG_NEXT, 0, 0); + HCLGE_CMD_FLAG_NEXT); if (ret) { dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret); return ret; } - dev_info(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n", - le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), - le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), - le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); - dev_info(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]), - le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2])); + dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), + le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), + le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); + dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]), + le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2])); return 0; } @@ -1434,7 +1440,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) /* read overflow error status */ ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD, - 0, 0, 0); + 0); if (ret) { dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); return ret; @@ -1450,9 +1456,9 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) le32_to_cpu(desc[0].data[0]); while (err->msg) { if (err->int_msk == err_sts) { - dev_warn(dev, "%s [error status=0x%x] found\n", - err->msg, - le32_to_cpu(desc[0].data[0])); + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, + le32_to_cpu(desc[0].data[0])); break; } err++; @@ -1460,13 +1466,13 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) } if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { - dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n", - le32_to_cpu(desc[0].data[1])); + dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[1])); } if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { - dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n", - le32_to_cpu(desc[0].data[2])); + dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[2])); } return 0; @@ -1483,8 +1489,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) /* read RAS error interrupt status */ ret = hclge_cmd_query_error(hdev, &desc[0], - HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, - 0, 0, 0); + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0); if (ret) { dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); /* reset everything for now */ @@ -1495,10 +1500,10 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) { if (status & HCLGE_ROCEE_RERR_INT_MASK) - dev_warn(dev, "ROCEE RAS AXI rresp error\n"); + dev_err(dev, "ROCEE RAS AXI rresp error\n"); if (status & HCLGE_ROCEE_BERR_INT_MASK) - dev_warn(dev, "ROCEE RAS AXI bresp error\n"); + dev_err(dev, "ROCEE RAS AXI bresp error\n"); reset_type = HNAE3_FUNC_RESET; @@ -1508,7 +1513,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) } if (status & HCLGE_ROCEE_ECC_INT_MASK) { - dev_warn(dev, "ROCEE RAS 2bit ECC error\n"); + dev_err(dev, "ROCEE RAS 2bit ECC error\n"); reset_type = HNAE3_GLOBAL_RESET; ret = hclge_log_rocee_ecc_error(hdev); @@ -1566,8 +1571,8 @@ int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) { - enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; struct hclge_dev *hdev = ae_dev->priv; + enum hnae3_reset_type reset_type; if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || hdev->pdev->revision < 0x21) @@ -1649,16 +1654,16 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) /* Handling Non-fatal HNS RAS errors */ if (status & HCLGE_RAS_REG_NFE_MASK) { - dev_warn(dev, - "HNS Non-Fatal RAS error(status=0x%x) identified\n", - status); + dev_err(dev, + "HNS Non-Fatal RAS error(status=0x%x) identified\n", + status); hclge_handle_all_ras_errors(hdev); } /* Handling Non-fatal Rocee RAS errors */ if (hdev->pdev->revision >= 0x21 && status & HCLGE_RAS_REG_ROCEE_ERR_MASK) { - dev_warn(dev, "ROCEE Non-Fatal RAS error identified\n"); + dev_err(dev, "ROCEE Non-Fatal RAS error identified\n"); hclge_handle_rocee_ras_error(ae_dev); } @@ -1737,8 +1742,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev, return; } - dev_warn(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%d), queue_id(%d)\n", - vf_id, q_id); + dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n", + vf_id, q_id); if (vf_id) { if (vf_id >= hdev->num_alloc_vport) { @@ -1755,8 +1760,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev, ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]); if (ret) - dev_warn(dev, "inform reset to vf(%d) failed %d!\n", - hdev->vport->vport_id, ret); + dev_err(dev, "inform reset to vf(%u) failed %d!\n", + hdev->vport->vport_id, ret); } else { set_bit(HNAE3_FUNC_RESET, reset_requests); } @@ -1802,8 +1807,8 @@ static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev, status = le32_to_cpu(*(desc_data + 2)) & HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; if (status) - dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]", - status); + dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]", + status); /* clear all main PF MSIx errors */ ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num); @@ -1997,7 +2002,7 @@ void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev) /* Handle Non-fatal HNS RAS errors */ if (status & HCLGE_RAS_REG_NFE_MASK) { - dev_warn(dev, "HNS hw error(RAS) identified during init\n"); + dev_err(dev, "HNS hw error(RAS) identified during init\n"); hclge_handle_all_ras_errors(hdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index 7ea8bb28a0cb..876fd81ad2f1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -5,6 +5,7 @@ #define __HCLGE_ERR_H #include "hclge_main.h" +#include "hnae3.h" #define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10 #define HCLGE_PF_RAS_INT_MIN_BD_NUM 4 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 3fde5471e1c0..fd7f94372ff0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -35,6 +35,25 @@ #define BUF_RESERVE_PERCENT 90 #define HCLGE_RESET_MAX_FAIL_CNT 5 +#define HCLGE_RESET_SYNC_TIME 100 +#define HCLGE_PF_RESET_SYNC_TIME 20 +#define HCLGE_PF_RESET_SYNC_CNT 1500 + +/* Get DFX BD number offset */ +#define HCLGE_DFX_BIOS_BD_OFFSET 1 +#define HCLGE_DFX_SSU_0_BD_OFFSET 2 +#define HCLGE_DFX_SSU_1_BD_OFFSET 3 +#define HCLGE_DFX_IGU_BD_OFFSET 4 +#define HCLGE_DFX_RPU_0_BD_OFFSET 5 +#define HCLGE_DFX_RPU_1_BD_OFFSET 6 +#define HCLGE_DFX_NCSI_BD_OFFSET 7 +#define HCLGE_DFX_RTC_BD_OFFSET 8 +#define HCLGE_DFX_PPP_BD_OFFSET 9 +#define HCLGE_DFX_RCB_BD_OFFSET 10 +#define HCLGE_DFX_TQP_BD_OFFSET 11 +#define HCLGE_DFX_SSU_2_BD_OFFSET 12 + +#define HCLGE_LINK_STATUS_MS 10 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); static int hclge_init_vlan_config(struct hclge_dev *hdev); @@ -47,6 +66,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev); static void hclge_clear_arfs_rules(struct hnae3_handle *handle); static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, unsigned long *addr); +static int hclge_set_default_loopback(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; @@ -317,6 +337,80 @@ static const u8 hclge_hash_key[] = { 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA }; +static const u32 hclge_dfx_bd_offset_list[] = { + HCLGE_DFX_BIOS_BD_OFFSET, + HCLGE_DFX_SSU_0_BD_OFFSET, + HCLGE_DFX_SSU_1_BD_OFFSET, + HCLGE_DFX_IGU_BD_OFFSET, + HCLGE_DFX_RPU_0_BD_OFFSET, + HCLGE_DFX_RPU_1_BD_OFFSET, + HCLGE_DFX_NCSI_BD_OFFSET, + HCLGE_DFX_RTC_BD_OFFSET, + HCLGE_DFX_PPP_BD_OFFSET, + HCLGE_DFX_RCB_BD_OFFSET, + HCLGE_DFX_TQP_BD_OFFSET, + HCLGE_DFX_SSU_2_BD_OFFSET +}; + +static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = { + HCLGE_OPC_DFX_BIOS_COMMON_REG, + HCLGE_OPC_DFX_SSU_REG_0, + HCLGE_OPC_DFX_SSU_REG_1, + HCLGE_OPC_DFX_IGU_EGU_REG, + HCLGE_OPC_DFX_RPU_REG_0, + HCLGE_OPC_DFX_RPU_REG_1, + HCLGE_OPC_DFX_NCSI_REG, + HCLGE_OPC_DFX_RTC_REG, + HCLGE_OPC_DFX_PPP_REG, + HCLGE_OPC_DFX_RCB_REG, + HCLGE_OPC_DFX_TQP_REG, + HCLGE_OPC_DFX_SSU_REG_2 +}; + +static const struct key_info meta_data_key_info[] = { + { PACKET_TYPE_ID, 6}, + { IP_FRAGEMENT, 1}, + { ROCE_TYPE, 1}, + { NEXT_KEY, 5}, + { VLAN_NUMBER, 2}, + { SRC_VPORT, 12}, + { DST_VPORT, 12}, + { TUNNEL_PACKET, 1}, +}; + +static const struct key_info tuple_key_info[] = { + { OUTER_DST_MAC, 48}, + { OUTER_SRC_MAC, 48}, + { OUTER_VLAN_TAG_FST, 16}, + { OUTER_VLAN_TAG_SEC, 16}, + { OUTER_ETH_TYPE, 16}, + { OUTER_L2_RSV, 16}, + { OUTER_IP_TOS, 8}, + { OUTER_IP_PROTO, 8}, + { OUTER_SRC_IP, 32}, + { OUTER_DST_IP, 32}, + { OUTER_L3_RSV, 16}, + { OUTER_SRC_PORT, 16}, + { OUTER_DST_PORT, 16}, + { OUTER_L4_RSV, 32}, + { OUTER_TUN_VNI, 24}, + { OUTER_TUN_FLOW_ID, 8}, + { INNER_DST_MAC, 48}, + { INNER_SRC_MAC, 48}, + { INNER_VLAN_TAG_FST, 16}, + { INNER_VLAN_TAG_SEC, 16}, + { INNER_ETH_TYPE, 16}, + { INNER_L2_RSV, 16}, + { INNER_IP_TOS, 8}, + { INNER_IP_PROTO, 8}, + { INNER_SRC_IP, 32}, + { INNER_DST_IP, 32}, + { INNER_L3_RSV, 16}, + { INNER_SRC_PORT, 16}, + { INNER_DST_PORT, 16}, + { INNER_L4_RSV, 32}, +}; + static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 @@ -364,9 +458,13 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num) u16 i, k, n; int ret; - desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL); + /* This may be called inside atomic sections, + * so GFP_ATOMIC is more suitalbe here + */ + desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC); if (!desc) return -ENOMEM; + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); ret = hclge_cmd_send(&hdev->hw, desc, desc_num); if (ret) { @@ -647,6 +745,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) count += 2; handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; + + if (hdev->hw.mac.phydev) { + count += 1; + handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; + } + } else if (stringset == ETH_SS_STATS) { count = ARRAY_SIZE(g_mac_stats_string) + hclge_tqps_get_sset_count(handle, stringset); @@ -702,14 +806,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) p = hclge_tqps_get_stats(handle, p); } -static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt, - u64 *rx_cnt) +static void hclge_get_mac_stat(struct hnae3_handle *handle, + struct hns3_mac_stats *mac_stats) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num; - *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num; + hclge_update_stats(handle, NULL); + + mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num; + mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num; } static int hclge_parse_func_status(struct hclge_dev *hdev, @@ -1075,6 +1181,7 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE) hclge_parse_backplane_link_mode(hdev, speed_ability); } + static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) { struct hclge_cfg_param_cmd *req; @@ -1270,6 +1377,12 @@ static int hclge_configure(struct hclge_dev *hdev) hclge_init_kdump_kernel_config(hdev); + /* Set the init affinity based on pci func number */ + i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); + i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0; + cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)), + &hdev->affinity_mask); + return ret; } @@ -2487,6 +2600,10 @@ static int hclge_mac_init(struct hclge_dev *hdev) return ret; } + ret = hclge_set_default_loopback(hdev); + if (ret) + return ret; + ret = hclge_buffer_alloc(hdev); if (ret) dev_err(&hdev->pdev->dev, @@ -2499,22 +2616,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) && !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) - schedule_work(&hdev->mbx_service_task); + queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq, + &hdev->mbx_service_task); } static void hclge_reset_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) - schedule_work(&hdev->rst_service_task); + queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq, + &hdev->rst_service_task); } -static void hclge_task_schedule(struct hclge_dev *hdev) +void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) { if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && - !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) - (void)schedule_work(&hdev->service_task); + !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) { + hdev->hw_stats.stats_timer++; + hdev->fd_arfs_expire_timer++; + mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), + system_wq, &hdev->service_task, + delay_time); + } } static int hclge_get_mac_link_status(struct hclge_dev *hdev) @@ -2729,25 +2853,6 @@ static int hclge_get_status(struct hnae3_handle *handle) return hdev->hw.mac.link; } -static void hclge_service_timer(struct timer_list *t) -{ - struct hclge_dev *hdev = from_timer(hdev, t, service_timer); - - mod_timer(&hdev->service_timer, jiffies + HZ); - hdev->hw_stats.stats_timer++; - hdev->fd_arfs_expire_timer++; - hclge_task_schedule(hdev); -} - -static void hclge_service_complete(struct hclge_dev *hdev) -{ - WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); - - /* Flush memory before next watchdog */ - smp_mb__before_atomic(); - clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); -} - static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) { u32 rst_src_reg, cmdq_src_reg, msix_src_reg; @@ -2763,9 +2868,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) * defer the processing of the mailbox events. Since, we would have not * cleared RX CMDQ event this time we would receive again another * interrupt from H/W just for the mailbox. + * + * check for vector0 reset event sources */ - - /* check for vector0 reset event sources */ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); @@ -2882,10 +2987,15 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) break; } - /* clear the source of interrupt if it is not cause by reset */ + hclge_clear_event_cause(hdev, event_cause, clearval); + + /* Enable interrupt if it is not cause by reset. And when + * clearval equal to 0, it means interrupt status may be + * cleared by hardware before driver reads status register. + * For this case, vector0 interrupt also should be enabled. + */ if (!clearval || event_cause == HCLGE_VECTOR0_EVENT_MBX) { - hclge_clear_event_cause(hdev, event_cause, clearval); hclge_enable_vector(&hdev->misc_vector, true); } @@ -2918,6 +3028,36 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev) hdev->num_msi_used += 1; } +static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct hclge_dev *hdev = container_of(notify, struct hclge_dev, + affinity_notify); + + cpumask_copy(&hdev->affinity_mask, mask); +} + +static void hclge_irq_affinity_release(struct kref *ref) +{ +} + +static void hclge_misc_affinity_setup(struct hclge_dev *hdev) +{ + irq_set_affinity_hint(hdev->misc_vector.vector_irq, + &hdev->affinity_mask); + + hdev->affinity_notify.notify = hclge_irq_affinity_notify; + hdev->affinity_notify.release = hclge_irq_affinity_release; + irq_set_affinity_notifier(hdev->misc_vector.vector_irq, + &hdev->affinity_notify); +} + +static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) +{ + irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL); + irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); +} + static int hclge_misc_irq_init(struct hclge_dev *hdev) { int ret; @@ -3105,6 +3245,71 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) return 0; } +static int hclge_func_reset_sync_vf(struct hclge_dev *hdev) +{ + struct hclge_pf_rst_sync_cmd *req; + struct hclge_desc desc; + int cnt = 0; + int ret; + + req = (struct hclge_pf_rst_sync_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true); + + do { + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* for compatible with old firmware, wait + * 100 ms for VF to stop IO + */ + if (ret == -EOPNOTSUPP) { + msleep(HCLGE_RESET_SYNC_TIME); + return 0; + } else if (ret) { + dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n", + ret); + return ret; + } else if (req->all_vf_ready) { + return 0; + } + msleep(HCLGE_PF_RESET_SYNC_TIME); + hclge_cmd_reuse_desc(&desc, true); + } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT); + + dev_err(&hdev->pdev->dev, "sync with VF timeout!\n"); + return -ETIME; +} + +void hclge_report_hw_error(struct hclge_dev *hdev, + enum hnae3_hw_error_type type) +{ + struct hnae3_client *client = hdev->nic_client; + u16 i; + + if (!client || !client->ops->process_hw_error || + !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) + return; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) + client->ops->process_hw_error(&hdev->vport[i].nic, type); +} + +static void hclge_handle_imp_error(struct hclge_dev *hdev) +{ + u32 reg_val; + + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) { + hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); + reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); + } + + if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) { + hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); + reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); + } +} + int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) { struct hclge_desc desc; @@ -3229,7 +3434,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) if (!clearval) return; - hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); + /* For revision 0x20, the reset interrupt source + * can only be cleared after hardware reset done + */ + if (hdev->pdev->revision == 0x20) + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, + clearval); + hclge_enable_vector(&hdev->misc_vector, true); } @@ -3250,19 +3461,33 @@ static int hclge_reset_prepare_down(struct hclge_dev *hdev) return ret; } -static int hclge_reset_prepare_wait(struct hclge_dev *hdev) +static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) { -#define HCLGE_RESET_SYNC_TIME 100 + u32 reg_val; + reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG); + if (enable) + reg_val |= HCLGE_NIC_SW_RST_RDY; + else + reg_val &= ~HCLGE_NIC_SW_RST_RDY; + + hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val); +} + +static int hclge_reset_prepare_wait(struct hclge_dev *hdev) +{ u32 reg_val; int ret = 0; switch (hdev->reset_type) { case HNAE3_FUNC_RESET: - /* There is no mechanism for PF to know if VF has stopped IO - * for now, just wait 100 ms for VF to stop IO + /* to confirm whether all running VF is ready + * before request PF reset */ - msleep(HCLGE_RESET_SYNC_TIME); + ret = hclge_func_reset_sync_vf(hdev); + if (ret) + return ret; + ret = hclge_func_reset_cmd(hdev, 0); if (ret) { dev_err(&hdev->pdev->dev, @@ -3279,15 +3504,19 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) hdev->rst_stats.pf_rst_cnt++; break; case HNAE3_FLR_RESET: - /* There is no mechanism for PF to know if VF has stopped IO - * for now, just wait 100 ms for VF to stop IO + /* to confirm whether all running VF is ready + * before request PF reset */ - msleep(HCLGE_RESET_SYNC_TIME); + ret = hclge_func_reset_sync_vf(hdev); + if (ret) + return ret; + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); hdev->rst_stats.flr_rst_cnt++; break; case HNAE3_IMP_RESET: + hclge_handle_imp_error(hdev); reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); @@ -3298,14 +3527,13 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) /* inform hardware that preparatory work is done */ msleep(HCLGE_RESET_SYNC_TIME); - hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, - HCLGE_NIC_CMQ_ENABLE); + hclge_reset_handshake(hdev, true); dev_info(&hdev->pdev->dev, "prepare wait ok\n"); return ret; } -static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) +static bool hclge_reset_err_handle(struct hclge_dev *hdev) { #define MAX_RESET_FAIL_CNT 5 @@ -3313,36 +3541,42 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) dev_info(&hdev->pdev->dev, "Reset pending %lu\n", hdev->reset_pending); return true; - } else if ((hdev->reset_type != HNAE3_IMP_RESET) && - (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) & - BIT(HCLGE_IMP_RESET_BIT))) { + } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & + HCLGE_RESET_INT_M) { dev_info(&hdev->pdev->dev, - "reset failed because IMP Reset is pending\n"); + "reset failed because new reset interrupt\n"); hclge_clear_reset_cause(hdev); return false; - } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) { - hdev->reset_fail_cnt++; - if (is_timeout) { - set_bit(hdev->reset_type, &hdev->reset_pending); - dev_info(&hdev->pdev->dev, - "re-schedule to wait for hw reset done\n"); - return true; - } - - dev_info(&hdev->pdev->dev, "Upgrade reset level\n"); - hclge_clear_reset_cause(hdev); - set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request); - mod_timer(&hdev->reset_timer, - jiffies + HCLGE_RESET_INTERVAL); - - return false; + } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { + hdev->rst_stats.reset_fail_cnt++; + set_bit(hdev->reset_type, &hdev->reset_pending); + dev_info(&hdev->pdev->dev, + "re-schedule reset task(%d)\n", + hdev->rst_stats.reset_fail_cnt); + return true; } hclge_clear_reset_cause(hdev); + + /* recover the handshake status when reset fail */ + hclge_reset_handshake(hdev, true); + dev_err(&hdev->pdev->dev, "Reset fail!\n"); return false; } +static int hclge_set_rst_done(struct hclge_dev *hdev) +{ + struct hclge_pf_rst_done_cmd *req; + struct hclge_desc desc; + + req = (struct hclge_pf_rst_done_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); + req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_reset_prepare_up(struct hclge_dev *hdev) { int ret = 0; @@ -3353,10 +3587,18 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev) case HNAE3_FLR_RESET: ret = hclge_set_all_vf_rst(hdev, false); break; + case HNAE3_GLOBAL_RESET: + /* fall through */ + case HNAE3_IMP_RESET: + ret = hclge_set_rst_done(hdev); + break; default: break; } + /* clear up the handshake status after re-initialize done */ + hclge_reset_handshake(hdev, false); + return ret; } @@ -3382,7 +3624,7 @@ static int hclge_reset_stack(struct hclge_dev *hdev) static void hclge_reset(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - bool is_timeout = false; + enum hnae3_reset_type reset_level; int ret; /* Initialize ae_dev reset status as well, in case enet layer wants to @@ -3410,10 +3652,8 @@ static void hclge_reset(struct hclge_dev *hdev) if (ret) goto err_reset; - if (hclge_reset_wait(hdev)) { - is_timeout = true; + if (hclge_reset_wait(hdev)) goto err_reset; - } hdev->rst_stats.hw_reset_done_cnt++; @@ -3439,7 +3679,8 @@ static void hclge_reset(struct hclge_dev *hdev) /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 * times */ - if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) + if (ret && + hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) goto err_reset; rtnl_lock(); @@ -3455,17 +3696,25 @@ static void hclge_reset(struct hclge_dev *hdev) goto err_reset; hdev->last_reset_time = jiffies; - hdev->reset_fail_cnt = 0; + hdev->rst_stats.reset_fail_cnt = 0; hdev->rst_stats.reset_done_cnt++; ae_dev->reset_type = HNAE3_NONE_RESET; - del_timer(&hdev->reset_timer); + + /* if default_reset_request has a higher level reset request, + * it should be handled as soon as possible. since some errors + * need this kind of reset to fix. + */ + reset_level = hclge_get_reset_level(ae_dev, + &hdev->default_reset_request); + if (reset_level != HNAE3_NONE_RESET) + set_bit(reset_level, &hdev->reset_request); return; err_reset_lock: rtnl_unlock(); err_reset: - if (hclge_reset_err_handle(hdev, is_timeout)) + if (hclge_reset_err_handle(hdev)) hclge_reset_task_schedule(hdev); } @@ -3493,16 +3742,17 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) handle = &hdev->vport[0].nic; if (time_before(jiffies, (hdev->last_reset_time + - HCLGE_RESET_INTERVAL))) + HCLGE_RESET_INTERVAL))) { + mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); return; - else if (hdev->default_reset_request) + } else if (hdev->default_reset_request) hdev->reset_level = hclge_get_reset_level(ae_dev, &hdev->default_reset_request); else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) hdev->reset_level = HNAE3_FUNC_RESET; - dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", + dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", hdev->reset_level); /* request reset & schedule reset task */ @@ -3525,6 +3775,12 @@ static void hclge_reset_timer(struct timer_list *t) { struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); + /* if default_reset_request has no value, it means that this reset + * request has already be handled, so just return here + */ + if (!hdev->default_reset_request) + return; + dev_info(&hdev->pdev->dev, "triggering reset in reset timer\n"); hclge_reset_event(hdev->pdev, NULL); @@ -3606,7 +3862,9 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev) static void hclge_service_task(struct work_struct *work) { struct hclge_dev *hdev = - container_of(work, struct hclge_dev, service_task); + container_of(work, struct hclge_dev, service_task.work); + + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { hclge_update_stats_for_all(hdev); @@ -3621,7 +3879,8 @@ static void hclge_service_task(struct work_struct *work) hclge_rfs_filter_expire(hdev); hdev->fd_arfs_expire_timer = 0; } - hclge_service_complete(hdev); + + hclge_task_schedule(hdev, round_jiffies_relative(HZ)); } struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) @@ -4197,8 +4456,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hnae3_ring_chain_node *node; struct hclge_desc desc; - struct hclge_ctrl_vector_chain_cmd *req - = (struct hclge_ctrl_vector_chain_cmd *)desc.data; + struct hclge_ctrl_vector_chain_cmd *req = + (struct hclge_ctrl_vector_chain_cmd *)desc.data; enum hclge_cmd_status status; enum hclge_opcode_type op; u16 tqp_type_and_id; @@ -5808,7 +6067,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, return -ENOSPC; } - rule = kzalloc(sizeof(*rule), GFP_KERNEL); + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); if (!rule) { spin_unlock_bh(&hdev->fd_rule_lock); @@ -5921,7 +6180,7 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) bool clear; hdev->fd_en = enable; - clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false; + clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; if (!enable) hclge_del_all_fd_entries(handle, clear); else @@ -5959,6 +6218,89 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) "mac enable fail, ret =%d.\n", ret); } +static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, + u8 switch_param, u8 param_mask) +{ + struct hclge_mac_vlan_switch_cmd *req; + struct hclge_desc desc; + u32 func_id; + int ret; + + func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0); + req = (struct hclge_mac_vlan_switch_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, + false); + req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; + req->func_id = cpu_to_le32(func_id); + req->switch_param = switch_param; + req->param_mask = param_mask; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "set mac vlan switch parameter fail, ret = %d\n", ret); + return ret; +} + +static void hclge_phy_link_status_wait(struct hclge_dev *hdev, + int link_ret) +{ +#define HCLGE_PHY_LINK_STATUS_NUM 200 + + struct phy_device *phydev = hdev->hw.mac.phydev; + int i = 0; + int ret; + + do { + ret = phy_read_status(phydev); + if (ret) { + dev_err(&hdev->pdev->dev, + "phy update link status fail, ret = %d\n", ret); + return; + } + + if (phydev->link == link_ret) + break; + + msleep(HCLGE_LINK_STATUS_MS); + } while (++i < HCLGE_PHY_LINK_STATUS_NUM); +} + +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) +{ +#define HCLGE_MAC_LINK_STATUS_NUM 100 + + int i = 0; + int ret; + + do { + ret = hclge_get_mac_link_status(hdev); + if (ret < 0) + return ret; + else if (ret == link_ret) + return 0; + + msleep(HCLGE_LINK_STATUS_MS); + } while (++i < HCLGE_MAC_LINK_STATUS_NUM); + return -EBUSY; +} + +static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, + bool is_phy) +{ +#define HCLGE_LINK_STATUS_DOWN 0 +#define HCLGE_LINK_STATUS_UP 1 + + int link_ret; + + link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; + + if (is_phy) + hclge_phy_link_status_wait(hdev, link_ret); + + return hclge_mac_link_status_wait(hdev, link_ret); +} + static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) { struct hclge_config_mac_mode_cmd *req; @@ -5995,20 +6337,14 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, +static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en, enum hnae3_loop loop_mode) { #define HCLGE_SERDES_RETRY_MS 10 #define HCLGE_SERDES_RETRY_NUM 100 -#define HCLGE_MAC_LINK_STATUS_MS 10 -#define HCLGE_MAC_LINK_STATUS_NUM 100 -#define HCLGE_MAC_LINK_STATUS_DOWN 0 -#define HCLGE_MAC_LINK_STATUS_UP 1 - struct hclge_serdes_lb_cmd *req; struct hclge_desc desc; - int mac_link_ret = 0; int ret, i = 0; u8 loop_mode_b; @@ -6031,10 +6367,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, if (en) { req->enable = loop_mode_b; req->mask = loop_mode_b; - mac_link_ret = HCLGE_MAC_LINK_STATUS_UP; } else { req->mask = loop_mode_b; - mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN; } ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -6064,21 +6398,84 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); return -EIO; } + return ret; +} + +static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) +{ + int ret; + + ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode); + if (ret) + return ret; hclge_cfg_mac_mode(hdev, en); - i = 0; - do { - /* serdes Internal loopback, independent of the network cable.*/ - msleep(HCLGE_MAC_LINK_STATUS_MS); - ret = hclge_get_mac_link_status(hdev); - if (ret == mac_link_ret) - return 0; - } while (++i < HCLGE_MAC_LINK_STATUS_NUM); + ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE); + if (ret) + dev_err(&hdev->pdev->dev, + "serdes loopback config mac mode timeout\n"); - dev_err(&hdev->pdev->dev, "config mac mode timeout\n"); + return ret; +} - return -EBUSY; +static int hclge_enable_phy_loopback(struct hclge_dev *hdev, + struct phy_device *phydev) +{ + int ret; + + if (!phydev->suspended) { + ret = phy_suspend(phydev); + if (ret) + return ret; + } + + ret = phy_resume(phydev); + if (ret) + return ret; + + return phy_loopback(phydev, true); +} + +static int hclge_disable_phy_loopback(struct hclge_dev *hdev, + struct phy_device *phydev) +{ + int ret; + + ret = phy_loopback(phydev, false); + if (ret) + return ret; + + return phy_suspend(phydev); +} + +static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + int ret; + + if (!phydev) + return -ENOTSUPP; + + if (en) + ret = hclge_enable_phy_loopback(hdev, phydev); + else + ret = hclge_disable_phy_loopback(hdev, phydev); + if (ret) { + dev_err(&hdev->pdev->dev, + "set phy loopback fail, ret = %d\n", ret); + return ret; + } + + hclge_cfg_mac_mode(hdev, en); + + ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE); + if (ret) + dev_err(&hdev->pdev->dev, + "phy loopback config mac mode timeout\n"); + + return ret; } static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id, @@ -6110,6 +6507,20 @@ static int hclge_set_loopback(struct hnae3_handle *handle, struct hclge_dev *hdev = vport->back; int i, ret; + /* Loopback can be enabled in three places: SSU, MAC, and serdes. By + * default, SSU loopback is enabled, so if the SMAC and the DMAC are + * the same, the packets are looped back in the SSU. If SSU loopback + * is disabled, packets can reach MAC even if SMAC is the same as DMAC. + */ + if (hdev->pdev->revision >= 0x21) { + u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B); + + ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, + HCLGE_SWITCH_ALW_LPBK_MASK); + if (ret) + return ret; + } + switch (loop_mode) { case HNAE3_LOOP_APP: ret = hclge_set_app_loopback(hdev, en); @@ -6118,6 +6529,9 @@ static int hclge_set_loopback(struct hnae3_handle *handle, case HNAE3_LOOP_PARALLEL_SERDES: ret = hclge_set_serdes_loopback(hdev, en, loop_mode); break; + case HNAE3_LOOP_PHY: + ret = hclge_set_phy_loopback(hdev, en); + break; default: ret = -ENOTSUPP; dev_err(&hdev->pdev->dev, @@ -6138,6 +6552,22 @@ static int hclge_set_loopback(struct hnae3_handle *handle, return 0; } +static int hclge_set_default_loopback(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_set_app_loopback(hdev, false); + if (ret) + return ret; + + ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); + if (ret) + return ret; + + return hclge_cfg_serdes_loopback(hdev, false, + HNAE3_LOOP_PARALLEL_SERDES); +} + static void hclge_reset_tqp_stats(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -6160,10 +6590,13 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) struct hclge_dev *hdev = vport->back; if (enable) { - mod_timer(&hdev->service_timer, jiffies + HZ); + hclge_task_schedule(hdev, round_jiffies_relative(HZ)); } else { - del_timer_sync(&hdev->service_timer); - cancel_work_sync(&hdev->service_task); + /* Set the DOWN flag here to disable the service to be + * scheduled again + */ + set_bit(HCLGE_STATE_DOWN, &hdev->state); + cancel_delayed_work_sync(&hdev->service_task); clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); } } @@ -6202,12 +6635,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle) if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && hdev->reset_type != HNAE3_FUNC_RESET) { hclge_mac_stop_phy(hdev); + hclge_update_link_status(hdev); return; } for (i = 0; i < handle->kinfo.num_tqps; i++) hclge_reset_tqp(handle, i); + hclge_config_mac_tnl_int(hdev, false); + /* Mac disable */ hclge_cfg_mac_mode(hdev, false); @@ -6249,7 +6685,6 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, enum hclge_mac_vlan_tbl_opcode op) { struct hclge_dev *hdev = vport->back; - int return_status = -EIO; if (cmdq_resp) { dev_err(&hdev->pdev->dev, @@ -6260,52 +6695,53 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, if (op == HCLGE_MAC_VLAN_ADD) { if ((!resp_code) || (resp_code == 1)) { - return_status = 0; + return 0; } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) { - return_status = -ENOSPC; dev_err(&hdev->pdev->dev, "add mac addr failed for uc_overflow.\n"); + return -ENOSPC; } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) { - return_status = -ENOSPC; dev_err(&hdev->pdev->dev, "add mac addr failed for mc_overflow.\n"); - } else { - dev_err(&hdev->pdev->dev, - "add mac addr failed for undefined, code=%d.\n", - resp_code); + return -ENOSPC; } + + dev_err(&hdev->pdev->dev, + "add mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; } else if (op == HCLGE_MAC_VLAN_REMOVE) { if (!resp_code) { - return_status = 0; + return 0; } else if (resp_code == 1) { - return_status = -ENOENT; dev_dbg(&hdev->pdev->dev, "remove mac addr failed for miss.\n"); - } else { - dev_err(&hdev->pdev->dev, - "remove mac addr failed for undefined, code=%d.\n", - resp_code); + return -ENOENT; } + + dev_err(&hdev->pdev->dev, + "remove mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; } else if (op == HCLGE_MAC_VLAN_LKUP) { if (!resp_code) { - return_status = 0; + return 0; } else if (resp_code == 1) { - return_status = -ENOENT; dev_dbg(&hdev->pdev->dev, "lookup mac addr failed for miss.\n"); - } else { - dev_err(&hdev->pdev->dev, - "lookup mac addr failed for undefined, code=%d.\n", - resp_code); + return -ENOENT; } - } else { - return_status = -EINVAL; + dev_err(&hdev->pdev->dev, - "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", - op); + "lookup mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; } - return return_status; + dev_err(&hdev->pdev->dev, + "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op); + + return -EINVAL; } static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) @@ -7019,7 +7455,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, is_broadcast_ether_addr(new_addr) || is_multicast_ether_addr(new_addr)) { dev_err(&hdev->pdev->dev, - "Change uc mac err! invalid mac:%p.\n", + "Change uc mac err! invalid mac:%pM.\n", new_addr); return -EINVAL; } @@ -7124,7 +7560,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) } static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, - bool is_kill, u16 vlan, u8 qos, + bool is_kill, u16 vlan, __be16 proto) { #define HCLGE_MAX_VF_BYTES 16 @@ -7235,7 +7671,7 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, } static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, - u16 vport_id, u16 vlan_id, u8 qos, + u16 vport_id, u16 vlan_id, bool is_kill) { u16 vport_idx, vport_num = 0; @@ -7245,7 +7681,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, return 0; ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, - 0, proto); + proto); if (ret) { dev_err(&hdev->pdev->dev, "Set %d vport vlan filter config fail, ret =%d.\n", @@ -7289,6 +7725,7 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) struct hclge_vport_vtag_tx_cfg_cmd *req; struct hclge_dev *hdev = vport->back; struct hclge_desc desc; + u16 bmap_index; int status; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); @@ -7311,8 +7748,10 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; - req->vf_bitmap[req->vf_offset] = - 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / + HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = + 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); status = hclge_cmd_send(&hdev->hw, &desc, 1); if (status) @@ -7329,6 +7768,7 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) struct hclge_vport_vtag_rx_cfg_cmd *req; struct hclge_dev *hdev = vport->back; struct hclge_desc desc; + u16 bmap_index; int status; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); @@ -7344,8 +7784,10 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) vcfg->vlan2_vlan_prionly ? 1 : 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; - req->vf_bitmap[req->vf_offset] = - 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / + HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = + 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); status = hclge_cmd_send(&hdev->hw, &desc, 1); if (status) @@ -7532,7 +7974,7 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) if (!vlan->hd_tbl_status) { ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), vport->vport_id, - vlan->vlan_id, 0, false); + vlan->vlan_id, false); if (ret) { dev_err(&hdev->pdev->dev, "restore vport vlan list failed, ret=%d\n", @@ -7558,7 +8000,7 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), vport->vport_id, - vlan_id, 0, + vlan_id, true); list_del(&vlan->node); @@ -7578,7 +8020,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), vport->vport_id, - vlan->vlan_id, 0, + vlan->vlan_id, true); vlan->hd_tbl_status = false; @@ -7611,7 +8053,7 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; - u16 vlan_proto, qos; + u16 vlan_proto; u16 state, vlan_id; int i; @@ -7620,12 +8062,11 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle) vport = &hdev->vport[i]; vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto; vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - qos = vport->port_base_vlan_cfg.vlan_info.qos; state = vport->port_base_vlan_cfg.state; if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), - vport->vport_id, vlan_id, qos, + vport->vport_id, vlan_id, false); continue; } @@ -7635,7 +8076,7 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle) hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), vport->vport_id, - vlan->vlan_id, 0, + vlan->vlan_id, false); } } @@ -7675,12 +8116,12 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, htons(new_info->vlan_proto), vport->vport_id, new_info->vlan_tag, - new_info->qos, false); + false); } ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), vport->vport_id, old_info->vlan_tag, - old_info->qos, true); + true); if (ret) return ret; @@ -7707,7 +8148,7 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, htons(vlan_info->vlan_proto), vport->vport_id, vlan_info->vlan_tag, - vlan_info->qos, false); + false); if (ret) return ret; @@ -7716,7 +8157,7 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, htons(old_vlan_info->vlan_proto), vport->vport_id, old_vlan_info->vlan_tag, - old_vlan_info->qos, true); + true); if (ret) return ret; @@ -7829,7 +8270,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, return -EBUSY; } - /* When port base vlan enabled, we use port base vlan as the vlan + /* when port base vlan enabled, we use port base vlan as the vlan * filter entry. In this case, we don't update vlan filter table * when user add new vlan or remove exist vlan, just update the vport * vlan list. The vlan id in vlan list will be writen in vlan filter @@ -7837,7 +8278,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, */ if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, - vlan_id, 0, is_kill); + vlan_id, is_kill); writen_to_tbl = true; } @@ -7848,7 +8289,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, hclge_add_vport_vlan_table(vport, vlan_id, writen_to_tbl); } else if (is_kill) { - /* When remove hw vlan filter failed, record the vlan id, + /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence * with stack */ @@ -7873,7 +8314,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev) while (vlan_id != VLAN_N_VID) { ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), vport->vport_id, vlan_id, - 0, true); + true); if (ret && ret != -EINVAL) return; @@ -8044,11 +8485,12 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) } while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { - /* Wait for tqp hw reset */ - msleep(20); reset_status = hclge_get_reset_status(hdev, queue_gid); if (reset_status) break; + + /* Wait for tqp hw reset */ + usleep_range(1000, 1200); } if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { @@ -8082,11 +8524,12 @@ void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) } while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { - /* Wait for tqp hw reset */ - msleep(20); reset_status = hclge_get_reset_status(hdev, queue_gid); if (reset_status) break; + + /* Wait for tqp hw reset */ + usleep_range(1000, 1200); } if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { @@ -8122,28 +8565,15 @@ static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) { int ret; - if (rx_en && tx_en) - hdev->fc_mode_last_time = HCLGE_FC_FULL; - else if (rx_en && !tx_en) - hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; - else if (!rx_en && tx_en) - hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; - else - hdev->fc_mode_last_time = HCLGE_FC_NONE; - if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) return 0; ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); - if (ret) { - dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", - ret); - return ret; - } - - hdev->tm_info.fc_mode = hdev->fc_mode_last_time; + if (ret) + dev_err(&hdev->pdev->dev, + "configure pauseparam error, ret = %d.\n", ret); - return 0; + return ret; } int hclge_cfg_flowctrl(struct hclge_dev *hdev) @@ -8208,6 +8638,21 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, } } +static void hclge_record_user_pauseparam(struct hclge_dev *hdev, + u32 rx_en, u32 tx_en) +{ + if (rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_FULL; + else if (rx_en && !tx_en) + hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; + else if (!rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; + else + hdev->fc_mode_last_time = HCLGE_FC_NONE; + + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; +} + static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, u32 rx_en, u32 tx_en) { @@ -8233,6 +8678,8 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, hclge_set_flowctrl_adv(hdev, rx_en, tx_en); + hclge_record_user_pauseparam(hdev, rx_en, tx_en); + if (!auto_neg) return hclge_cfg_pauseparam(hdev, rx_en, tx_en); @@ -8481,7 +8928,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, } } - return ret; + return 0; clear_nic: hdev->nic_client = NULL; @@ -8602,12 +9049,10 @@ static void hclge_state_uninit(struct hclge_dev *hdev) set_bit(HCLGE_STATE_DOWN, &hdev->state); set_bit(HCLGE_STATE_REMOVING, &hdev->state); - if (hdev->service_timer.function) - del_timer_sync(&hdev->service_timer); if (hdev->reset_timer.function) del_timer_sync(&hdev->reset_timer); - if (hdev->service_task.func) - cancel_work_sync(&hdev->service_task); + if (hdev->service_task.work.func) + cancel_delayed_work_sync(&hdev->service_task); if (hdev->rst_service_task.func) cancel_work_sync(&hdev->rst_service_task); if (hdev->mbx_service_task.func) @@ -8812,12 +9257,16 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_dcb_ops_set(hdev); - timer_setup(&hdev->service_timer, hclge_service_timer, 0); timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); - INIT_WORK(&hdev->service_task, hclge_service_task); + INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); + /* Setup affinity after service timer setup because add_timer_on + * is called in affinity notify. + */ + hclge_misc_affinity_setup(hdev); + hclge_clear_all_event_cause(hdev); hclge_clear_resetting_state(hdev); @@ -8842,7 +9291,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_state_init(hdev); hdev->last_reset_time = jiffies; - pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); + dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", + HCLGE_DRIVER_NAME); + return 0; err_mdiobus_unreg: @@ -8979,6 +9430,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) struct hclge_dev *hdev = ae_dev->priv; struct hclge_mac *mac = &hdev->hw.mac; + hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); if (mac->phydev) @@ -9238,106 +9690,314 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, } #define MAX_SEPARATE_NUM 4 -#define SEPARATOR_VALUE 0xFFFFFFFF +#define SEPARATOR_VALUE 0xFDFCFBFA #define REG_NUM_PER_LINE 4 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) +#define REG_SEPARATOR_LINE 1 +#define REG_NUM_REMAIN_MASK 3 +#define BD_LIST_MAX_NUM 30 -static int hclge_get_regs_len(struct hnae3_handle *handle) +int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) { - int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - u32 regs_num_32_bit, regs_num_64_bit; + /*prepare 4 commands to query DFX BD number*/ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true); + desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true); + + return hclge_cmd_send(&hdev->hw, desc, 4); +} + +static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev, + int *bd_num_list, + u32 type_num) +{ +#define HCLGE_DFX_REG_BD_NUM 4 + + u32 entries_per_desc, desc_index, index, offset, i; + struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM]; int ret; - ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + ret = hclge_query_bd_num_cmd_send(hdev, desc); if (ret) { dev_err(&hdev->pdev->dev, - "Get register number failed, ret = %d.\n", ret); - return -EOPNOTSUPP; + "Get dfx bd num fail, status is %d.\n", ret); + return ret; } - cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; - common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; - ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; - tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; + entries_per_desc = ARRAY_SIZE(desc[0].data); + for (i = 0; i < type_num; i++) { + offset = hclge_dfx_bd_offset_list[i]; + index = offset % entries_per_desc; + desc_index = offset / entries_per_desc; + bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]); + } - return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + - tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE + - regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); + return ret; } -static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, - void *data) +static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc_src, int bd_num, + enum hclge_opcode_type cmd) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - u32 regs_num_32_bit, regs_num_64_bit; - int i, j, reg_um, separator_num; + struct hclge_desc *desc = desc_src; + int i, ret; + + hclge_cmd_setup_basic_desc(desc, cmd, true); + for (i = 0; i < bd_num - 1; i++) { + desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc++; + hclge_cmd_setup_basic_desc(desc, cmd, true); + } + + desc = desc_src; + ret = hclge_cmd_send(&hdev->hw, desc, bd_num); + if (ret) + dev_err(&hdev->pdev->dev, + "Query dfx reg cmd(0x%x) send fail, status is %d.\n", + cmd, ret); + + return ret; +} + +static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num, + void *data) +{ + int entries_per_desc, reg_num, separator_num, desc_index, index, i; + struct hclge_desc *desc = desc_src; u32 *reg = data; + + entries_per_desc = ARRAY_SIZE(desc->data); + reg_num = entries_per_desc * bd_num; + separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) { + index = i % entries_per_desc; + desc_index = i / entries_per_desc; + *reg++ = le32_to_cpu(desc[desc_index].data[index]); + } + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + return reg_num + separator_num; +} + +static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) +{ + u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); + int data_len_per_desc, data_len, bd_num, i; + int bd_num_list[BD_LIST_MAX_NUM]; int ret; - *version = hdev->fw_version; + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg bd num fail, status is %d.\n", ret); + return ret; + } - ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data); + *len = 0; + for (i = 0; i < dfx_reg_type_num; i++) { + bd_num = bd_num_list[i]; + data_len = data_len_per_desc * bd_num; + *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE; + } + + return ret; +} + +static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) +{ + u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); + int bd_num, bd_num_max, buf_len, i; + int bd_num_list[BD_LIST_MAX_NUM]; + struct hclge_desc *desc_src; + u32 *reg = data; + int ret; + + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); if (ret) { dev_err(&hdev->pdev->dev, - "Get register number failed, ret = %d.\n", ret); - return; + "Get dfx reg bd num fail, status is %d.\n", ret); + return ret; } + bd_num_max = bd_num_list[0]; + for (i = 1; i < dfx_reg_type_num; i++) + bd_num_max = max_t(int, bd_num_max, bd_num_list[i]); + + buf_len = sizeof(*desc_src) * bd_num_max; + desc_src = kzalloc(buf_len, GFP_KERNEL); + if (!desc_src) { + dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__); + return -ENOMEM; + } + + for (i = 0; i < dfx_reg_type_num; i++) { + bd_num = bd_num_list[i]; + ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num, + hclge_dfx_reg_opcode_list[i]); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg fail, status is %d.\n", ret); + break; + } + + reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg); + } + + kfree(desc_src); + return ret; +} + +static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, + struct hnae3_knic_private_info *kinfo) +{ +#define HCLGE_RING_REG_OFFSET 0x200 +#define HCLGE_RING_INT_REG_OFFSET 0x4 + + int i, j, reg_num, separator_num; + int data_num_sum; + u32 *reg = data; + /* fetching per-PF registers valus from PF PCIe register space */ - reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; - for (i = 0; i < reg_um; i++) + reg_num = ARRAY_SIZE(cmdq_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); for (i = 0; i < separator_num; i++) *reg++ = SEPARATOR_VALUE; + data_num_sum = reg_num + separator_num; - reg_um = sizeof(common_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; - for (i = 0; i < reg_um; i++) + reg_num = ARRAY_SIZE(common_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); for (i = 0; i < separator_num; i++) *reg++ = SEPARATOR_VALUE; + data_num_sum += reg_num + separator_num; - reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + reg_num = ARRAY_SIZE(ring_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); for (j = 0; j < kinfo->num_tqps; j++) { - for (i = 0; i < reg_um; i++) + for (i = 0; i < reg_num; i++) *reg++ = hclge_read_dev(&hdev->hw, ring_reg_addr_list[i] + - 0x200 * j); + HCLGE_RING_REG_OFFSET * j); for (i = 0; i < separator_num; i++) *reg++ = SEPARATOR_VALUE; } + data_num_sum += (reg_num + separator_num) * kinfo->num_tqps; - reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); for (j = 0; j < hdev->num_msi_used - 1; j++) { - for (i = 0; i < reg_um; i++) + for (i = 0; i < reg_num; i++) *reg++ = hclge_read_dev(&hdev->hw, tqp_intr_reg_addr_list[i] + - 4 * j); + HCLGE_RING_INT_REG_OFFSET * j); for (i = 0; i < separator_num; i++) *reg++ = SEPARATOR_VALUE; } + data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1); + + return data_num_sum; +} + +static int hclge_get_regs_len(struct hnae3_handle *handle) +{ + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int regs_num_32_bit, regs_num_64_bit, dfx_regs_len; + int regs_lines_32_bit, regs_lines_64_bit; + int ret; + + ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get register number failed, ret = %d.\n", ret); + return ret; + } + + ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg len failed, ret = %d.\n", ret); + return ret; + } + + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + + return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + + tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit + + regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len; +} + +static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, + void *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 regs_num_32_bit, regs_num_64_bit; + int i, reg_num, separator_num, ret; + u32 *reg = data; + + *version = hdev->fw_version; + + ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get register number failed, ret = %d.\n", ret); + return; + } + + reg += hclge_fetch_pf_reg(hdev, reg, kinfo); - /* fetching PF common registers values from firmware */ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); if (ret) { dev_err(&hdev->pdev->dev, "Get 32 bit register failed, ret = %d.\n", ret); return; } + reg_num = regs_num_32_bit; + reg += reg_num; + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; - reg += regs_num_32_bit; ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); - if (ret) + if (ret) { dev_err(&hdev->pdev->dev, "Get 64 bit register failed, ret = %d.\n", ret); + return; + } + reg_num = regs_num_64_bit * 2; + reg += reg_num; + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + ret = hclge_get_dfx_reg(hdev, reg); + if (ret) + dev_err(&hdev->pdev->dev, + "Get dfx register failed, ret = %d.\n", ret); } static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) @@ -9452,7 +10112,7 @@ static const struct hnae3_ae_ops hclge_ops = { .set_mtu = hclge_set_mtu, .reset_queue = hclge_reset_tqp, .get_stats = hclge_get_stats, - .get_mac_pause_stats = hclge_get_mac_pause_stat, + .get_mac_stats = hclge_get_mac_stat, .update_stats = hclge_update_stats, .get_strings = hclge_get_strings, .get_sset_count = hclge_get_sset_count, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 6a12285f4c76..3e9574a9e22d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -119,7 +119,7 @@ #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) -#define HCLGE_TQP_RESET_TRY_TIMES 10 +#define HCLGE_TQP_RESET_TRY_TIMES 200 #define HCLGE_PHY_PAGE_MDIX 0 #define HCLGE_PHY_PAGE_COPPER 0 @@ -148,6 +148,8 @@ enum HLCGE_PORT_TYPE { NETWORK_PORT }; +#define PF_VPORT_ID 0 + #define HCLGE_PF_ID_S 0 #define HCLGE_PF_ID_M GENMASK(2, 0) #define HCLGE_VF_ID_S 3 @@ -164,6 +166,7 @@ enum HLCGE_PORT_TYPE { #define HCLGE_GLOBAL_RESET_BIT 0 #define HCLGE_CORE_RESET_BIT 1 #define HCLGE_IMP_RESET_BIT 2 +#define HCLGE_RESET_INT_M GENMASK(2, 0) #define HCLGE_FUN_RST_ING 0x20C00 #define HCLGE_FUN_RST_ING_B 0 @@ -178,6 +181,8 @@ enum HLCGE_PORT_TYPE { #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 #define HCLGE_VECTOR0_IMP_RESET_INT_B 1 +#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U +#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U #define HCLGE_MAC_DEFAULT_FRAME \ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) @@ -302,6 +307,13 @@ enum hclge_fc_mode { HCLGE_FC_DEFAULT }; +enum hclge_link_fail_code { + HCLGE_LF_NORMAL, + HCLGE_LF_REF_CLOCK_LOST, + HCLGE_LF_XSFP_TX_DISABLE, + HCLGE_LF_XSFP_ABSENT, +}; + #define HCLGE_PG_NUM 4 #define HCLGE_SCH_MODE_SP 0 #define HCLGE_SCH_MODE_DWRR 1 @@ -532,50 +544,6 @@ struct key_info { u8 key_length; /* use bit as unit */ }; -static const struct key_info meta_data_key_info[] = { - { PACKET_TYPE_ID, 6}, - { IP_FRAGEMENT, 1}, - { ROCE_TYPE, 1}, - { NEXT_KEY, 5}, - { VLAN_NUMBER, 2}, - { SRC_VPORT, 12}, - { DST_VPORT, 12}, - { TUNNEL_PACKET, 1}, -}; - -static const struct key_info tuple_key_info[] = { - { OUTER_DST_MAC, 48}, - { OUTER_SRC_MAC, 48}, - { OUTER_VLAN_TAG_FST, 16}, - { OUTER_VLAN_TAG_SEC, 16}, - { OUTER_ETH_TYPE, 16}, - { OUTER_L2_RSV, 16}, - { OUTER_IP_TOS, 8}, - { OUTER_IP_PROTO, 8}, - { OUTER_SRC_IP, 32}, - { OUTER_DST_IP, 32}, - { OUTER_L3_RSV, 16}, - { OUTER_SRC_PORT, 16}, - { OUTER_DST_PORT, 16}, - { OUTER_L4_RSV, 32}, - { OUTER_TUN_VNI, 24}, - { OUTER_TUN_FLOW_ID, 8}, - { INNER_DST_MAC, 48}, - { INNER_SRC_MAC, 48}, - { INNER_VLAN_TAG_FST, 16}, - { INNER_VLAN_TAG_SEC, 16}, - { INNER_ETH_TYPE, 16}, - { INNER_L2_RSV, 16}, - { INNER_IP_TOS, 8}, - { INNER_IP_PROTO, 8}, - { INNER_SRC_IP, 32}, - { INNER_DST_IP, 32}, - { INNER_L3_RSV, 16}, - { INNER_SRC_PORT, 16}, - { INNER_DST_PORT, 16}, - { INNER_L4_RSV, 32}, -}; - #define MAX_KEY_LENGTH 400 #define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) @@ -691,6 +659,7 @@ struct hclge_rst_stats { u32 global_rst_cnt; /* the number of GLOBAL */ u32 imp_rst_cnt; /* the number of IMP reset */ u32 reset_cnt; /* the number of reset */ + u32 reset_fail_cnt; /* the number of reset fail */ }; /* time and register status when mac tunnel interruption occur */ @@ -757,7 +726,6 @@ struct hclge_dev { unsigned long reset_request; /* reset has been requested */ unsigned long reset_pending; /* client rst is pending to be served */ struct hclge_rst_stats rst_stats; - u32 reset_fail_cnt; u32 fw_version; u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ u16 num_tqps; /* Num task queue pairs of this PF */ @@ -806,9 +774,8 @@ struct hclge_dev { u16 adminq_work_limit; /* Num of admin receive queue desc to process */ unsigned long service_timer_period; unsigned long service_timer_previous; - struct timer_list service_timer; struct timer_list reset_timer; - struct work_struct service_task; + struct delayed_work service_task; struct work_struct rst_service_task; struct work_struct mbx_service_task; @@ -864,6 +831,10 @@ struct hclge_dev { DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE); + + /* affinity mask and notify for misc interrupt */ + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; }; /* VPort level vlan tag configuration for TX direction */ @@ -990,7 +961,6 @@ int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); -int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); void hclge_mbx_handler(struct hclge_dev *hdev); int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); @@ -1018,4 +988,9 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, u16 state, u16 vlan_tag, u16 qos, u16 vlan_proto); +void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time); +int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc); +void hclge_report_hw_error(struct hclge_dev *hdev, + enum hnae3_hw_error_type type); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 690b9990215c..f5da28a60d00 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -479,7 +479,7 @@ static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport, hclge_reset_vf_queue(vport, queue_id); - /* send response msg to VF after queue reset complete*/ + /* send response msg to VF after queue reset complete */ hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0); } @@ -545,6 +545,36 @@ static int hclge_get_rss_key(struct hclge_vport *vport, HCLGE_RSS_MBX_RESP_LEN); } +static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) +{ + switch (link_fail_code) { + case HCLGE_LF_REF_CLOCK_LOST: + dev_warn(&hdev->pdev->dev, "Reference clock lost!\n"); + break; + case HCLGE_LF_XSFP_TX_DISABLE: + dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n"); + break; + case HCLGE_LF_XSFP_ABSENT: + dev_warn(&hdev->pdev->dev, "SFP is absent!\n"); + break; + default: + break; + } +} + +static void hclge_handle_link_change_event(struct hclge_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req) +{ +#define LINK_STATUS_OFFSET 1 +#define LINK_FAIL_CODE_OFFSET 2 + + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); + hclge_task_schedule(hdev, 0); + + if (!req->msg[LINK_STATUS_OFFSET]) + hclge_link_fail_parse(hdev, req->msg[LINK_FAIL_CODE_OFFSET]); +} + static bool hclge_cmd_crq_empty(struct hclge_hw *hw) { u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); @@ -552,6 +582,15 @@ static bool hclge_cmd_crq_empty(struct hclge_hw *hw) return tail == hw->cmq.crq.next_to_use; } +static void hclge_handle_ncsi_error(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + + ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); + dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); + ae_dev->ops->reset_event(hdev->pdev, NULL); +} + void hclge_mbx_handler(struct hclge_dev *hdev) { struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; @@ -707,6 +746,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "PF fail(%d) to media type for VF\n", ret); break; + case HCLGE_MBX_PUSH_LINK_STATUS: + hclge_handle_link_change_event(hdev, req); + break; + case HCLGE_MBX_NCSI_ERROR: + hclge_handle_ncsi_error(hdev); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index abb1b438564e..dc4dfd4602ab 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -231,6 +231,8 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle) linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising); + phy_attached_info(phydev); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 3f41fa2bc414..9f0e35f27789 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -81,16 +81,13 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, return 0; } else if (ir_calc > ir) { /* Increasing the denominator to select ir_s value */ - while (ir_calc > ir) { + while (ir_calc >= ir && ir) { ir_s_calc++; ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc)); } - if (ir_calc == ir) - *ir_b = 126; - else - *ir_b = (ir * tick * (1 << ir_s_calc) + - (DIVISOR_CLK >> 1)) / DIVISOR_CLK; + *ir_b = (ir * tick * (1 << ir_s_calc) + (DIVISOR_CLK >> 1)) / + DIVISOR_CLK; } else { /* Increasing the numerator to select ir_u value */ u32 numerator; @@ -104,7 +101,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, if (ir_calc == ir) { *ir_b = 126; } else { - u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc)); + u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc); *ir_b = (ir * tick + (denominator >> 1)) / denominator; } } @@ -404,8 +401,8 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) { struct hclge_port_shapping_cmd *shap_cfg_cmd; struct hclge_desc desc; - u32 shapping_para = 0; u8 ir_u, ir_b, ir_s; + u32 shapping_para; int ret; ret = hclge_shaper_para_calc(hdev->hw.mac.speed, @@ -650,12 +647,8 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev) } } -static int hclge_tm_schd_info_init(struct hclge_dev *hdev) +static void hclge_tm_schd_info_init(struct hclge_dev *hdev) { - if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && - (hdev->tm_info.num_pg != 1)) - return -EINVAL; - hclge_tm_pg_info_init(hdev); hclge_tm_tc_info_init(hdev); @@ -663,8 +656,6 @@ static int hclge_tm_schd_info_init(struct hclge_dev *hdev) hclge_tm_vport_info_update(hdev); hclge_pfc_info_init(hdev); - - return 0; } static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) @@ -1428,15 +1419,15 @@ int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) int hclge_tm_schd_init(struct hclge_dev *hdev) { - int ret; - /* fc_mode is HCLGE_FC_FULL on reset */ hdev->tm_info.fc_mode = HCLGE_FC_FULL; hdev->fc_mode_last_time = hdev->tm_info.fc_mode; - ret = hclge_tm_schd_info_init(hdev); - if (ret) - return ret; + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && + hdev->tm_info.num_pg != 1) + return -EINVAL; + + hclge_tm_schd_info_init(hdev); return hclge_tm_init_hw(hdev, true); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 652b796044e3..d5d1cc5d1b6e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -43,7 +43,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw) { struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw); struct hclgevf_cmq_ring *csq = &hw->cmq.csq; - int clean = 0; + int clean; u32 head; head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); @@ -74,7 +74,7 @@ static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw) static bool hclgevf_is_special_opcode(u16 opcode) { - u16 spec_opcode[] = {0x30, 0x31, 0x32}; + static const u16 spec_opcode[] = {0x30, 0x31, 0x32}; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@ -97,7 +97,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); - reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG); + reg_val &= HCLGEVF_NIC_SW_RST_RDY; + reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); @@ -405,7 +407,15 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) } hdev->fw_version = version; - dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); + dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n", + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 127a434a56f3..f830eef02e5c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -244,8 +244,11 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { #define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 #define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 #define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 -#define HCLGEVF_NIC_CMQ_EN_B 16 -#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B) + +/* this bit indicates that the driver is ready for hardware reset */ +#define HCLGEVF_NIC_SW_RST_RDY_B 16 +#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B) + #define HCLGEVF_NIC_CMQ_DESC_NUM 1024 #define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 #define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index a13a0e101c3b..e3090b3dab1d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -743,7 +743,7 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, } static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, - const u8 *key, const u8 hfunc) + const u8 *key, const u8 hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; @@ -1269,7 +1269,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, HCLGE_MBX_VLAN_FILTER, msg_data, HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); - /* When remove hw vlan filter failed, record the vlan id, + /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence * with stack. */ @@ -1396,19 +1396,22 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) u32 val; int ret; - /* wait to check the hardware reset completion status */ - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); - dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); - if (hdev->reset_type == HNAE3_FLR_RESET) return hclgevf_flr_poll_timeout(hdev, HCLGEVF_RESET_WAIT_US, HCLGEVF_RESET_WAIT_CNT); - - ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, - !(val & HCLGEVF_RST_ING_BITS), - HCLGEVF_RESET_WAIT_US, - HCLGEVF_RESET_WAIT_TIMEOUT_US); + else if (hdev->reset_type == HNAE3_VF_RESET) + ret = readl_poll_timeout(hdev->hw.io_base + + HCLGEVF_VF_RST_ING, val, + !(val & HCLGEVF_VF_RST_ING_BIT), + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_TIMEOUT_US); + else + ret = readl_poll_timeout(hdev->hw.io_base + + HCLGEVF_RST_ING, val, + !(val & HCLGEVF_RST_ING_BITS), + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_TIMEOUT_US); /* hardware completion status should be available by this time */ if (ret) { @@ -1426,6 +1429,20 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) return 0; } +static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) +{ + u32 reg_val; + + reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); + if (enable) + reg_val |= HCLGEVF_NIC_SW_RST_RDY; + else + reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; + + hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, + reg_val); +} + static int hclgevf_reset_stack(struct hclgevf_dev *hdev) { int ret; @@ -1448,7 +1465,14 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) if (ret) return ret; - return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); + if (ret) + return ret; + + /* clear handshake status with IMP */ + hclgevf_reset_handshake(hdev, false); + + return 0; } static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) @@ -1474,8 +1498,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); /* inform hardware that preparatory work is done */ msleep(HCLGEVF_RESET_SYNC_TIME); - hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, - HCLGEVF_NIC_CMQ_ENABLE); + hclgevf_reset_handshake(hdev, true); dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", hdev->reset_type, ret); @@ -1484,6 +1507,8 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) { + /* recover handshake status with IMP when reset fail */ + hclgevf_reset_handshake(hdev, true); hdev->rst_stats.rst_fail_cnt++; dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n", hdev->rst_stats.rst_fail_cnt); @@ -1494,9 +1519,6 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) if (hclgevf_is_reset_pending(hdev)) { set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); - } else { - hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, - HCLGEVF_NIC_CMQ_ENABLE); } } @@ -1539,7 +1561,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) rtnl_lock(); - /* now, re-initialize the nic client and ae device*/ + /* now, re-initialize the nic client and ae device */ ret = hclgevf_reset_stack(hdev); if (ret) { dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); @@ -1762,9 +1784,8 @@ static void hclgevf_reset_service_task(struct work_struct *work) * 1b and 2. cases but we will not get any intimation about 1a * from PF as cmdq would be in unreliable state i.e. mailbox * communication between PF and VF would be broken. - */ - - /* if we are never geting into pending state it means either: + * + * if we are never geting into pending state it means either: * 1. PF is not receiving our request which could be due to IMP * reset * 2. PF is screwed @@ -1867,29 +1888,45 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, u32 *clearval) { - u32 cmdq_src_reg, rst_ing_reg; + u32 val, cmdq_stat_reg, rst_ing_reg; /* fetch the events from their corresponding regs */ - cmdq_src_reg = hclgevf_read_dev(&hdev->hw, - HCLGEVF_VECTOR0_CMDQ_SRC_REG); + cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, + HCLGEVF_VECTOR0_CMDQ_STAT_REG); - if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { + if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); dev_info(&hdev->pdev->dev, "receive reset interrupt 0x%x!\n", rst_ing_reg); set_bit(HNAE3_VF_RESET, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); - *clearval = cmdq_src_reg; + *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); hdev->rst_stats.vf_rst_cnt++; + /* set up VF hardware reset status, its PF will clear + * this status when PF has initialized done. + */ + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); + hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, + val | HCLGEVF_VF_RST_ING_BIT); return HCLGEVF_VECTOR0_EVENT_RST; } /* check for vector0 mailbox(=CMDQ RX) event source */ - if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { - cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); - *clearval = cmdq_src_reg; + if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { + /* for revision 0x21, clearing interrupt is writing bit 0 + * to the clear register, writing bit 1 means to keep the + * old value. + * for revision 0x20, the clear register is a read & write + * register, so we should just write 0 to the bit we are + * handling, and keep other bits as cmdq_stat_reg. + */ + if (hdev->pdev->revision >= 0x21) + *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); + else + *clearval = cmdq_stat_reg & + ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); + return HCLGEVF_VECTOR0_EVENT_MBX; } @@ -2023,9 +2060,10 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) { struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i, ret; + int ret; + u32 i; - rss_cfg->rss_size = hdev->rss_size_max; + rss_cfg->rss_size = hdev->nic.kinfo.rss_size; if (hdev->pdev->revision >= 0x21) { rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; @@ -2062,13 +2100,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) /* Initialize RSS indirect table */ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) - rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; + rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; ret = hclgevf_set_rss_indir_table(hdev); if (ret) return ret; - return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); + return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); } static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) @@ -2265,7 +2303,7 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) { - int ret = 0; + int ret; hclgevf_get_misc_vector(hdev); @@ -2695,7 +2733,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) } hdev->last_reset_time = jiffies; - pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); + dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", + HCLGEVF_DRIVER_NAME); return 0; @@ -2797,6 +2836,77 @@ static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, *max_rss_size = hdev->rss_size_max; } +static void hclgevf_update_rss_size(struct hnae3_handle *handle, + u32 new_tqps_num) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u16 max_rss_size; + + kinfo->req_rss_size = new_tqps_num; + + max_rss_size = min_t(u16, hdev->rss_size_max, + hdev->num_tqps / kinfo->num_tc); + + /* Use the user's configuration when it is not larger than + * max_rss_size, otherwise, use the maximum specification value. + */ + if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && + kinfo->req_rss_size <= max_rss_size) + kinfo->rss_size = kinfo->req_rss_size; + else if (kinfo->rss_size > max_rss_size || + (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) + kinfo->rss_size = max_rss_size; + + kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; +} + +static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + u16 cur_rss_size = kinfo->rss_size; + u16 cur_tqps = kinfo->num_tqps; + u32 *rss_indir; + unsigned int i; + int ret; + + hclgevf_update_rss_size(handle, new_tqps_num); + + ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); + if (ret) + return ret; + + /* RSS indirection table has been configuared by user */ + if (rxfh_configured) + goto out; + + /* Reinitializes the rss indirect table according to the new RSS size */ + rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); + if (!rss_indir) + return -ENOMEM; + + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + rss_indir[i] = i % kinfo->rss_size; + + ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); + if (ret) + dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", + ret); + + kfree(rss_indir); + +out: + if (!ret) + dev_info(&hdev->pdev->dev, + "Channels changed, rss_size from %u to %u, tqps from %u to %u", + cur_rss_size, kinfo->rss_size, + cur_tqps, kinfo->rss_size * kinfo->num_tc); + + return ret; +} + static int hclgevf_get_status(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -3004,6 +3114,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, .reset_event = hclgevf_reset_event, .set_default_reset_request = hclgevf_set_def_reset_request, + .set_channels = hclgevf_set_channels, .get_channels = hclgevf_get_channels, .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, .get_regs_len = hclgevf_get_regs_len, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 5a9e30998a8f..bdde3afc286b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -87,6 +87,8 @@ /* Vector0 interrupt CMDQ event source register(RW) */ #define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 +/* Vector0 interrupt CMDQ event status register(RO) */ +#define HCLGEVF_VECTOR0_CMDQ_STAT_REG 0x27104 /* CMDQ register bits for RX event(=MBX event) */ #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 /* RST register bits for RESET event */ @@ -103,6 +105,9 @@ (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \ HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT) +#define HCLGEVF_VF_RST_ING 0x07008 +#define HCLGEVF_VF_RST_ING_BIT BIT(16) + #define HCLGEVF_RSS_IND_TBL_SIZE 512 #define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff #define HCLGEVF_RSS_KEY_SIZE 40 @@ -120,7 +125,7 @@ #define HCLGEVF_S_IP_BIT BIT(3) #define HCLGEVF_V_TAG_BIT BIT(4) -#define HCLGEVF_STATS_TIMER_INTERVAL (36) +#define HCLGEVF_STATS_TIMER_INTERVAL 36U enum hclgevf_evt_cause { HCLGEVF_VECTOR0_EVENT_RST, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 6a96987bd8f0..a108191c9e50 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -277,9 +277,9 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) switch (msg_q[0]) { case HCLGE_MBX_LINK_STAT_CHANGE: - link_status = le16_to_cpu(msg_q[1]); + link_status = msg_q[1]; memcpy(&speed, &msg_q[2], sizeof(speed)); - duplex = (u8)le16_to_cpu(msg_q[4]); + duplex = (u8)msg_q[4]; /* update upper layer with new link link status */ hclgevf_update_link_status(hdev, link_status); @@ -287,7 +287,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) break; case HCLGE_MBX_LINK_STAT_MODE: - idx = (u8)le16_to_cpu(msg_q[1]); + idx = (u8)msg_q[1]; if (idx) memcpy(&hdev->hw.mac.supported, &msg_q[2], sizeof(unsigned long)); @@ -301,14 +301,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) * has been completely reset. After this stack should * eventually be re-initialized. */ - reset_type = le16_to_cpu(msg_q[1]); + reset_type = (enum hnae3_reset_type)msg_q[1]; set_bit(reset_type, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); break; case HCLGE_MBX_PUSH_VLAN_INFO: - state = le16_to_cpu(msg_q[1]); + state = msg_q[1]; vlan_info = &msg_q[1]; hclgevf_update_port_base_vlan_info(hdev, state, (u8 *)vlan_info, 8); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 9c78251f9c39..0e13d1c7e474 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -136,7 +136,7 @@ static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma_addr; int i, j; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index cca71ba7a74a..13e30eba5349 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -1577,20 +1577,16 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) ehea_destroy_eq(pr->eq); for (i = 0; i < pr->rq1_skba.len; i++) - if (pr->rq1_skba.arr[i]) - dev_kfree_skb(pr->rq1_skba.arr[i]); + dev_kfree_skb(pr->rq1_skba.arr[i]); for (i = 0; i < pr->rq2_skba.len; i++) - if (pr->rq2_skba.arr[i]) - dev_kfree_skb(pr->rq2_skba.arr[i]); + dev_kfree_skb(pr->rq2_skba.arr[i]); for (i = 0; i < pr->rq3_skba.len; i++) - if (pr->rq3_skba.arr[i]) - dev_kfree_skb(pr->rq3_skba.arr[i]); + dev_kfree_skb(pr->rq3_skba.arr[i]); for (i = 0; i < pr->sq_skba.len; i++) - if (pr->sq_skba.arr[i]) - dev_kfree_skb(pr->sq_skba.arr[i]); + dev_kfree_skb(pr->sq_skba.arr[i]); vfree(pr->rq1_skba.arr); vfree(pr->rq2_skba.arr); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 395dde444483..9e43c9ace9c2 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1549,7 +1549,7 @@ emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) ctrl); /* skb fragments */ for (i = 0; i < nr_frags; ++i) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF)) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5cb55ea671e3..2e5172f61564 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1485,7 +1485,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) memcpy(dst + cur, page_address(skb_frag_page(frag)) + - frag->page_offset, skb_frag_size(frag)); + skb_frag_off(frag), skb_frag_size(frag)); cur += skb_frag_size(frag); } } else { diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index a41008523c98..71d3d8854d8f 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -937,8 +937,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) txdr->buffer_info[i].dma, txdr->buffer_info[i].length, DMA_TO_DEVICE); - if (txdr->buffer_info[i].skb) - dev_kfree_skb(txdr->buffer_info[i].skb); + dev_kfree_skb(txdr->buffer_info[i].skb); } } diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index f703fa58458e..86493fea56e4 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -2889,9 +2889,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; - frag = &skb_shinfo(skb)->frags[f]; len = skb_frag_size(frag); offset = 0; @@ -4176,8 +4175,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* an error means any chain goes out the window * too */ - if (rx_ring->rx_skb_top) - dev_kfree_skb(rx_ring->rx_skb_top); + dev_kfree_skb(rx_ring->rx_skb_top); rx_ring->rx_skb_top = NULL; goto next_desc; } diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 08342698386d..de8c5818a305 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1126,8 +1126,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); - if (buffer_info->skb) - dev_kfree_skb(buffer_info->skb); + dev_kfree_skb(buffer_info->skb); } } @@ -1139,8 +1138,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) dma_unmap_single(&pdev->dev, buffer_info->dma, 2048, DMA_FROM_DEVICE); - if (buffer_info->skb) - dev_kfree_skb(buffer_info->skb); + dev_kfree_skb(buffer_info->skb); } } diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 395b05701480..a1fab77b2096 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) else phy_reg |= 0xFA; e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg); + + if (speed == SPEED_1000) { + hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL, + &phy_reg); + + phy_reg |= HV_PM_CTRL_K1_CLK_REQ; + + hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL, + phy_reg); + } } hw->phy.ops.release(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index eb09c755fa17..1502895eb45d 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -210,7 +210,7 @@ /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) -#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 +#define HV_PM_CTRL_K1_CLK_REQ 0x200 #define HV_PM_CTRL_K1_ENABLE 0x4000 #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index e4baa13b3cda..d7d56e42a6aa 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1780,8 +1780,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - queue_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, 1); + mod_delayed_work(adapter->e1000_workqueue, + &adapter->watchdog_task, HZ); } /* Reset on uncorrectable ECC error */ @@ -1861,8 +1861,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - queue_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, 1); + mod_delayed_work(adapter->e1000_workqueue, + &adapter->watchdog_task, HZ); } /* Reset on uncorrectable ECC error */ @@ -1907,8 +1907,8 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - queue_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, 1); + mod_delayed_work(adapter->e1000_workqueue, + &adapter->watchdog_task, HZ); } if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -5579,9 +5579,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; - frag = &skb_shinfo(skb)->frags[f]; len = skb_frag_size(frag); offset = 0; @@ -6297,7 +6296,7 @@ fl_out: static int e1000e_pm_freeze(struct device *dev) { - struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); @@ -6630,7 +6629,7 @@ static int __e1000_resume(struct pci_dev *pdev) #ifdef CONFIG_PM_SLEEP static int e1000e_pm_thaw(struct device *dev) { - struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); e1000e_set_interrupt_capability(adapter); @@ -6679,8 +6678,7 @@ static int e1000e_pm_resume(struct device *dev) static int e1000e_pm_runtime_idle(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); u16 eee_lp; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 7d42582ed48d..b14441944b4b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #ifndef _FM10K_H_ #define _FM10K_H_ @@ -177,14 +177,10 @@ static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) #define MIN_Q_VECTORS 1 enum fm10k_non_q_vectors { FM10K_MBX_VECTOR, -#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF - NON_Q_VECTORS_PF + NON_Q_VECTORS }; -#define NON_Q_VECTORS(hw) (((hw)->mac.type == fm10k_mac_pf) ? \ - NON_Q_VECTORS_PF : \ - NON_Q_VECTORS_VF) -#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS(hw)) +#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS) struct fm10k_q_vector { struct fm10k_intfc *interface; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index 20768ac7f17e..c45315472245 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k.h" @@ -36,7 +36,7 @@ static int fm10k_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) { u8 num_tc = 0; - int i, err; + int i; /* verify type and determine num_tcs needed */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { @@ -57,7 +57,7 @@ static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) /* update TC hardware mapping if necessary */ if (num_tc != netdev_get_num_tc(dev)) { - err = fm10k_setup_tc(dev, num_tc); + int err = fm10k_setup_tc(dev, num_tc); if (err) return err; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index dca104121c05..1d27b2fb23af 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -160,8 +160,6 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx); q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc); - if (!q_vector->dbg_q_vector) - return; /* Generate a file for each rx ring in the q_vector */ for (i = 0; i < q_vector->tx.count; i++) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 4895dd83dd08..c681d2d28107 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include <linux/vmalloc.h> @@ -222,7 +222,6 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer, const unsigned int size) { unsigned int i; - char *p; if (!pointer) { /* memory is not zero allocated so we have to clear it */ @@ -232,7 +231,7 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer, } for (i = 0; i < size; i++) { - p = (char *)pointer + stats[i].stat_offset; + char *p = (char *)pointer + stats[i].stat_offset; switch (stats[i].sizeof_stat) { case sizeof(u64): @@ -651,7 +650,6 @@ static int fm10k_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_q_vector *qv; u16 tx_itr, rx_itr; int i; @@ -677,7 +675,8 @@ static int fm10k_set_coalesce(struct net_device *dev, /* update q_vectors */ for (i = 0; i < interface->num_q_vectors; i++) { - qv = interface->q_vector[i]; + struct fm10k_q_vector *qv = interface->q_vector[i]; + qv->tx.itr = tx_itr; qv->rx.itr = rx_itr; } @@ -1115,13 +1114,12 @@ static void fm10k_get_channels(struct net_device *dev, struct ethtool_channels *ch) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_hw *hw = &interface->hw; /* report maximum channels */ ch->max_combined = fm10k_max_channels(dev); /* report info for other vector */ - ch->max_other = NON_Q_VECTORS(hw); + ch->max_other = NON_Q_VECTORS; ch->other_count = ch->max_other; /* record RSS queues */ @@ -1133,14 +1131,13 @@ static int fm10k_set_channels(struct net_device *dev, { struct fm10k_intfc *interface = netdev_priv(dev); unsigned int count = ch->combined_count; - struct fm10k_hw *hw = &interface->hw; /* verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) return -EINVAL; /* verify other_count has not changed */ - if (ch->other_count != NON_Q_VECTORS(hw)) + if (ch->other_count != NON_Q_VECTORS) return -EINVAL; /* verify the number of channels does not exceed hardware limits */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 8de77155f2e7..afe1fafd2447 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k.h" #include "fm10k_vf.h" @@ -426,7 +426,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) struct fm10k_iov_data *iov_data = interface->iov_data; struct fm10k_hw *hw = &interface->hw; size_t size; - int i, err; + int i; /* return error if iov_data is already populated */ if (iov_data) @@ -452,6 +452,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) /* loop through vf_info structures initializing each entry */ for (i = 0; i < num_vfs; i++) { struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + int err; /* Record VF VSI value */ vf_info->vsi = i + 1; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 90270b4a1682..2be9222510e7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include <linux/types.h> #include <linux/module.h> @@ -17,7 +17,7 @@ const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = DRV_SUMMARY; static const char fm10k_copyright[] = - "Copyright(c) 2013 - 2018 Intel Corporation."; + "Copyright(c) 2013 - 2019 Intel Corporation."; MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); @@ -315,7 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, /* prefetch first cache line of first page */ prefetch(page_addr); #if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); + prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES)); #endif /* allocate a skb to store the frags */ @@ -946,7 +946,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring, struct sk_buff *skb = first->skb; struct fm10k_tx_buffer *tx_buffer; struct fm10k_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned char *data; dma_addr_t dma; unsigned int data_len, size; @@ -1073,8 +1073,11 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, * + 2 desc gap to keep tail from touching head * otherwise try next time */ - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + count += TXD_USE_COUNT(skb_frag_size(frag)); + } if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; @@ -1823,7 +1826,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface) v_budget = min_t(u16, v_budget, num_online_cpus()); /* account for vectors not related to queues */ - v_budget += NON_Q_VECTORS(hw); + v_budget += NON_Q_VECTORS; /* At the same time, hardware can only support a maximum of * hw.mac->max_msix_vectors vectors. With features @@ -1855,7 +1858,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface) } /* record the number of queues available for q_vectors */ - interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); + interface->num_q_vectors = v_budget - NON_Q_VECTORS; return 0; } @@ -1869,7 +1872,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface) static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) { struct net_device *dev = interface->netdev; - int pc, offset, rss_i, i, q_idx; + int pc, offset, rss_i, i; u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; u8 num_pcs = netdev_get_num_tc(dev); @@ -1879,7 +1882,8 @@ static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) rss_i = interface->ring_feature[RING_F_RSS].indices; for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { - q_idx = pc; + int q_idx = pc; + for (i = 0; i < rss_i; i++) { interface->tx_ring[offset + i]->reg_idx = q_idx; interface->tx_ring[offset + i]->qos_pc = pc; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 21021fe4f1c3..75e51f91036c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_common.h" @@ -297,13 +297,14 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) { struct fm10k_mbx_fifo *fifo = &mbx->rx; u16 total_len = 0, msg_len; - u32 *msg; /* length should include previous amounts pushed */ len += mbx->pushed; /* offset in message is based off of current message size */ do { + u32 *msg; + msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len); msg_len = FM10K_TLV_DWORD_LEN(*msg); total_len += msg_len; @@ -1920,7 +1921,6 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, /* reduce length by 1 to convert to a mask */ u16 mbmem_len = mbx->mbmem_len - 1; u16 tail_len, len = 0; - u32 *msg; /* push head behind tail */ if (mbx->tail < head) @@ -1930,6 +1930,8 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, /* determine msg aligned offset for end of buffer */ do { + u32 *msg; + msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len); tail_len = len; len += FM10K_TLV_DWORD_LEN(*msg); @@ -2132,7 +2134,8 @@ fifo_err: * DWORDs, not bytes. Any invalid values will cause the mailbox to return * error. **/ -s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, +s32 fm10k_sm_mbx_init(struct fm10k_hw __always_unused *hw, + struct fm10k_mbx_info *mbx, const struct fm10k_msg_data *msg_data) { mbx->mbx_reg = FM10K_GMBX; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 538a8467f434..09f7a246e134 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k.h" #include <linux/vmalloc.h> @@ -54,7 +54,7 @@ err: **/ static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface) { - int i, err = 0; + int i, err; for (i = 0; i < interface->num_tx_queues; i++) { err = fm10k_setup_tx_resources(interface->tx_ring[i]); @@ -121,7 +121,7 @@ err: **/ static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface) { - int i, err = 0; + int i, err; for (i = 0; i < interface->num_rx_queues; i++) { err = fm10k_setup_rx_resources(interface->rx_ring[i]); @@ -169,7 +169,6 @@ void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring, **/ static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) { - struct fm10k_tx_buffer *tx_buffer; unsigned long size; u16 i; @@ -179,7 +178,8 @@ static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { - tx_buffer = &tx_ring->tx_buffer[i]; + struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i]; + fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); } @@ -253,8 +253,7 @@ static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring) if (!rx_ring->rx_buffer) return; - if (rx_ring->skb) - dev_kfree_skb(rx_ring->skb); + dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; /* Free all the Rx ring sk_buffs */ @@ -871,7 +870,7 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev, u16 glort = interface->glort; u16 vid = interface->vid; bool set = !!(vid / VLAN_N_VID); - int err = -EHOSTDOWN; + int err; /* drop any leading bits on the VLAN ID */ vid &= VLAN_N_VID - 1; @@ -891,7 +890,7 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev, u16 glort = interface->glort; u16 vid = interface->vid; bool set = !!(vid / VLAN_N_VID); - int err = -EHOSTDOWN; + int err; /* drop any leading bits on the VLAN ID */ vid &= VLAN_N_VID - 1; @@ -1444,11 +1443,11 @@ static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type, static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, struct fm10k_l2_accel *l2_accel) { - struct fm10k_ring *ring; int i; for (i = 0; i < interface->num_rx_queues; i++) { - ring = interface->rx_ring[i]; + struct fm10k_ring *ring = interface->rx_ring[i]; + rcu_assign_pointer(ring->l2_accel, l2_accel); } @@ -1463,7 +1462,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, struct fm10k_l2_accel *old_l2_accel = NULL; struct fm10k_dglort_cfg dglort = { 0 }; struct fm10k_hw *hw = &interface->hw; - int size = 0, i; + int size, i; u16 vid, glort; /* The hardware supported by fm10k only filters on the destination MAC diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index e49fb51d3613..bb236fa44048 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include <linux/module.h> #include <linux/interrupt.h> @@ -344,7 +344,6 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface) struct net_device *netdev = interface->netdev; u32 __iomem *hw_addr; u32 value; - int err; /* do nothing if netdev is still present or hw_addr is set */ if (netif_device_present(netdev) || interface->hw.hw_addr) @@ -362,6 +361,8 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface) hw_addr = READ_ONCE(interface->uc_addr); value = readl(hw_addr); if (~value) { + int err; + /* Make sure the reset was initiated because we detached, * otherwise we might race with a different reset flow. */ @@ -697,8 +698,6 @@ static void fm10k_watchdog_subtask(struct fm10k_intfc *interface) */ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) { - int i; - /* If we're down or resetting, just bail */ if (test_bit(__FM10K_DOWN, interface->state) || test_bit(__FM10K_RESETTING, interface->state)) @@ -710,6 +709,8 @@ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) interface->next_tx_hang_check = jiffies + (2 * HZ); if (netif_carrier_ok(interface->netdev)) { + int i; + /* Force detection of hung controller */ for (i = 0; i < interface->num_tx_queues; i++) set_check_for_tx_hang(interface->tx_ring[i]); @@ -897,7 +898,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, /* Map interrupt */ if (ring->q_vector) { - txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); + txint = ring->q_vector->v_idx + NON_Q_VECTORS; txint |= FM10K_INT_MAP_TIMER0; } @@ -1036,7 +1037,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* Map interrupt */ if (ring->q_vector) { - rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); + rxint = ring->q_vector->v_idx + NON_Q_VECTORS; rxint |= FM10K_INT_MAP_TIMER1; } @@ -1719,10 +1720,9 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface) void fm10k_qv_free_irq(struct fm10k_intfc *interface) { int vector = interface->num_q_vectors; - struct fm10k_hw *hw = &interface->hw; struct msix_entry *entry; - entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector]; + entry = &interface->msix_entries[NON_Q_VECTORS + vector]; while (vector) { struct fm10k_q_vector *q_vector; @@ -1759,7 +1759,7 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface) unsigned int ri = 0, ti = 0; int vector, err; - entry = &interface->msix_entries[NON_Q_VECTORS(hw)]; + entry = &interface->msix_entries[NON_Q_VECTORS]; for (vector = 0; vector < interface->num_q_vectors; vector++) { struct fm10k_q_vector *q_vector = interface->q_vector[vector]; @@ -2339,7 +2339,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) /* Restart the MAC/VLAN request queue in-case of outstanding events */ fm10k_macvlan_schedule(interface); - return err; + return 0; } /** @@ -2352,7 +2352,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) **/ static int __maybe_unused fm10k_resume(struct device *dev) { - struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); + struct fm10k_intfc *interface = dev_get_drvdata(dev); struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; int err; @@ -2379,7 +2379,7 @@ static int __maybe_unused fm10k_resume(struct device *dev) **/ static int __maybe_unused fm10k_suspend(struct device *dev) { - struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); + struct fm10k_intfc *interface = dev_get_drvdata(dev); struct net_device *netdev = interface->netdev; netif_device_detach(netdev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index cb4d02629b86..be07bfdb0bb4 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_pf.h" #include "fm10k_vf.h" @@ -1152,7 +1152,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, * assumption is that in this case it is acceptable to just directly * hand off the message from the VF to the underlying shared code. **/ -s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, +s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 __always_unused **results, struct fm10k_mbx_info *mbx) { struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; @@ -1352,7 +1352,6 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) { struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; - u32 *result; s32 err = 0; u32 msg[2]; u8 mode = 0; @@ -1362,7 +1361,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, return FM10K_ERR_PARAM; if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) { - result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE]; + u32 *result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE]; /* XCAST mode update requested */ err = fm10k_tlv_attr_get_u8(result, &mode); @@ -1566,7 +1565,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type, /* read remaining fields */ fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI); fault->address <<= 32; - fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO); + fault->address |= fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO); fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO); /* clear valid bit to allow for next error */ @@ -1642,7 +1641,7 @@ const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = { * switch API. **/ s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { u16 glort, mask; u32 dglort_map; @@ -1685,7 +1684,7 @@ const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = { * This handler configures the default VLAN for the PF **/ static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { u16 glort, pvid; u32 pvid_update; @@ -1746,7 +1745,7 @@ const struct fm10k_tlv_attr fm10k_err_msg_attr[] = { * messages that the PF has sent. **/ s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_swapi_error err_msg; s32 err; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index 2a7a40bf2b1c..21eff0895a7a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_tlv.h" @@ -472,7 +472,7 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, const struct fm10k_tlv_attr *tlv_attr) { u32 i, attr_id, offset = 0; - s32 err = 0; + s32 err; u16 len; /* verify pointers are not NULL */ @@ -587,8 +587,9 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg, * a minimum it just indicates that the message requested was * unimplemented. **/ -s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) +s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw, + u32 __always_unused **results, + struct fm10k_mbx_info __always_unused *mbx) { return FM10K_NOT_IMPLEMENTED; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 9fb9fca375e3..15ac1c7885bc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #ifndef _FM10K_TYPE_H_ #define _FM10K_TYPE_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index a8519c1f0406..dc8ccd378ec9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_vf.h" @@ -198,7 +198,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) * This function should determine the MAC address for the VF **/ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { u8 perm_addr[ETH_ALEN]; u16 vid; @@ -267,8 +267,10 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) * This function is used to add or remove unicast MAC addresses for * the VF. **/ -static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, - const u8 *mac, u16 vid, bool add, u8 flags) +static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, + u16 __always_unused glort, + const u8 *mac, u16 vid, bool add, + u8 __always_unused flags) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[7]; @@ -309,7 +311,8 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, * This function is used to add or remove multicast MAC addresses for * the VF. **/ -static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, +static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, + u16 __always_unused glort, const u8 *mac, u16 vid, bool add) { struct fm10k_mbx_info *mbx = &hw->mbx; @@ -373,7 +376,7 @@ const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = { * are ready to bring up the interface. **/ s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ? FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO; @@ -392,8 +395,9 @@ s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results, * enabled we can add filters, if it is disabled all filters for this * logical port are flushed. **/ -static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort, - u16 count, bool enable) +static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, + u16 __always_unused glort, + u16 __always_unused count, bool enable) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[2]; @@ -420,7 +424,8 @@ static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort, * so that it can enable either multicast, multicast promiscuous, or * promiscuous mode of operation. **/ -static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) +static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, + u16 __always_unused glort, u8 mode) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[3]; @@ -475,7 +480,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, * that information to then populate a DGLORTMAP/DEC entry and the queues * to which it has been assigned. **/ -static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw, +static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw __always_unused *hw, struct fm10k_dglort_cfg *dglort) { /* verify the dglort pointer */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 84bd06901014..2af9f6308f84 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -131,7 +131,6 @@ enum i40e_state_t { __I40E_PF_RESET_REQUESTED, __I40E_CORE_RESET_REQUESTED, __I40E_GLOBAL_RESET_REQUESTED, - __I40E_EMP_RESET_REQUESTED, __I40E_EMP_RESET_INTR_RECEIVED, __I40E_SUSPENDED, __I40E_PTP_TX_IN_PROGRESS, @@ -244,11 +243,11 @@ struct i40e_fdir_filter { u32 fd_id; }; -#define I40E_CLOUD_FIELD_OMAC 0x01 -#define I40E_CLOUD_FIELD_IMAC 0x02 -#define I40E_CLOUD_FIELD_IVLAN 0x04 -#define I40E_CLOUD_FIELD_TEN_ID 0x08 -#define I40E_CLOUD_FIELD_IIP 0x10 +#define I40E_CLOUD_FIELD_OMAC BIT(0) +#define I40E_CLOUD_FIELD_IMAC BIT(1) +#define I40E_CLOUD_FIELD_IVLAN BIT(2) +#define I40E_CLOUD_FIELD_TEN_ID BIT(3) +#define I40E_CLOUD_FIELD_IIP BIT(4) #define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC #define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC @@ -1021,6 +1020,7 @@ i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type) return NULL; } void i40e_update_stats(struct i40e_vsi *vsi); +void i40e_update_veb_stats(struct i40e_veb *veb); void i40e_update_eth_stats(struct i40e_vsi *vsi); struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); int i40e_fetch_switch_configuration(struct i40e_pf *pf, diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 814acbe79ffd..72c04881d290 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -610,8 +610,10 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) if (hw->aq.api_maj_ver > 1 || (hw->aq.api_maj_ver == 1 && - hw->aq.api_min_ver >= 8)) + hw->aq.api_min_ver >= 8)) { hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; + hw->flags |= I40E_HW_FLAG_DROP_MODE; + } if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 6536023fa074..530613f31527 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -11,8 +11,8 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0008 -#define I40E_FW_API_VERSION_MINOR_X710 0x0008 +#define I40E_FW_API_VERSION_MINOR_X722 0x0009 +#define I40E_FW_API_VERSION_MINOR_X710 0x0009 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ I40E_FW_API_VERSION_MINOR_X710 : \ @@ -1382,7 +1382,7 @@ struct i40e_aqc_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) /* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +/* 0x0001 reserved */ /* 0x0002 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 @@ -1394,6 +1394,9 @@ struct i40e_aqc_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C +/* 0x000D reserved */ +/* 0x000E reserved */ +/* 0x000F reserved */ /* 0x0010 to 0x0017 is for custom filters */ #define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ #define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ @@ -2051,20 +2054,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); struct i40e_aq_set_mac_config { __le16 max_frame_size; u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 +#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80 u8 tx_timer_priority; /* bitmap */ __le16 tx_timer_value; __le16 fc_refresh_threshold; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 906cf68d3453..d37c6e0e5f08 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include "i40e.h" #include "i40e_type.h" #include "i40e_adminq.h" #include "i40e_prototype.h" @@ -13,7 +14,7 @@ * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ -static i40e_status i40e_set_mac_type(struct i40e_hw *hw) +i40e_status i40e_set_mac_type(struct i40e_hw *hw) { i40e_status status = 0; @@ -1577,19 +1578,22 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, cmd_details); - if (status) - break; - - if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) { + switch (hw->aq.asq_last_status) { + case I40E_AQ_RC_EIO: status = I40E_ERR_UNKNOWN_PHY; break; - } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) { + case I40E_AQ_RC_EAGAIN: usleep_range(1000, 2000); total_delay++; status = I40E_ERR_TIMEOUT; + break; + /* also covers I40E_AQ_RC_OK */ + default: + break; } - } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) && - (total_delay < max_delay)); + + } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && + (total_delay < max_delay)); if (status) return status; @@ -1643,25 +1647,15 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, return status; } -/** - * i40e_set_fc - * @hw: pointer to the hw struct - * @aq_failures: buffer to return AdminQ failure information - * @atomic_restart: whether to enable atomic link restart - * - * Set the requested flow control mode using set_phy_config. - **/ -enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, - bool atomic_restart) +static noinline_for_stack enum i40e_status_code +i40e_set_fc_status(struct i40e_hw *hw, + struct i40e_aq_get_phy_abilities_resp *abilities, + bool atomic_restart) { - enum i40e_fc_mode fc_mode = hw->fc.requested_mode; - struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; - enum i40e_status_code status; + enum i40e_fc_mode fc_mode = hw->fc.requested_mode; u8 pause_mask = 0x0; - *aq_failures = 0x0; - switch (fc_mode) { case I40E_FC_FULL: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; @@ -1677,6 +1671,48 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, break; } + memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); + /* clear the old pause settings */ + config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & + ~(I40E_AQ_PHY_FLAG_PAUSE_RX); + /* set the new abilities */ + config.abilities |= pause_mask; + /* If the abilities have changed, then set the new config */ + if (config.abilities == abilities->abilities) + return 0; + + /* Auto restart link so settings take effect */ + if (atomic_restart) + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* Copy over all the old settings */ + config.phy_type = abilities->phy_type; + config.phy_type_ext = abilities->phy_type_ext; + config.link_speed = abilities->link_speed; + config.eee_capability = abilities->eee_capability; + config.eeer = abilities->eeer_val; + config.low_power_ctrl = abilities->d3_lpan; + config.fec_config = abilities->fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + + return i40e_aq_set_phy_config(hw, &config, NULL); +} + +/** + * i40e_set_fc + * @hw: pointer to the hw struct + * @aq_failures: buffer to return AdminQ failure information + * @atomic_restart: whether to enable atomic link restart + * + * Set the requested flow control mode using set_phy_config. + **/ +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_restart) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + enum i40e_status_code status; + + *aq_failures = 0x0; + /* Get the current phy config */ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); @@ -1685,31 +1721,10 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, return status; } - memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); - /* clear the old pause settings */ - config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & - ~(I40E_AQ_PHY_FLAG_PAUSE_RX); - /* set the new abilities */ - config.abilities |= pause_mask; - /* If the abilities have changed, then set the new config */ - if (config.abilities != abilities.abilities) { - /* Auto restart link so settings take effect */ - if (atomic_restart) - config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; - /* Copy over all the old settings */ - config.phy_type = abilities.phy_type; - config.phy_type_ext = abilities.phy_type_ext; - config.link_speed = abilities.link_speed; - config.eee_capability = abilities.eee_capability; - config.eeer = abilities.eeer_val; - config.low_power_ctrl = abilities.d3_lpan; - config.fec_config = abilities.fec_cfg_curr_mod_ext_info & - I40E_AQ_PHY_FEC_CONFIG_MASK; - status = i40e_aq_set_phy_config(hw, &config, NULL); + status = i40e_set_fc_status(hw, &abilities, atomic_restart); + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; - if (status) - *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; - } /* Update the link info */ status = i40e_update_link_info(hw); if (status) { @@ -2537,7 +2552,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) * i40e_updatelink_status - update status of the HW network link * @hw: pointer to the hw struct **/ -i40e_status i40e_update_link_info(struct i40e_hw *hw) +noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) { struct i40e_aq_get_phy_abilities_resp abilities; i40e_status status = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 292eeb3def10..200a1cb3b536 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -877,7 +877,23 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) return I40E_NOT_SUPPORTED; /* Read LLDP NVM area */ - ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) { + u8 offset = 0; + + if (hw->mac.type == I40E_MAC_XL710) + offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET; + else if (hw->mac.type == I40E_MAC_X722) + offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET; + else + return I40E_NOT_SUPPORTED; + + ret = i40e_read_nvm_module_data(hw, + I40E_SR_EMP_SR_SETTINGS_PTR, + offset, 1, + &lldp_cfg.adminstatus); + } else { + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + } if (ret) return I40E_ERR_NOT_READY; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index ddb48ae7cce4..2a80c5daa376 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -30,6 +30,8 @@ #define I40E_CEE_SUBTYPE_APP_PRI 4 #define I40E_CEE_MAX_FEAT_TYPE 3 +#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B +#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31 /* Defines for LLDP TLV header */ #define I40E_LLDP_TLV_LEN_SHIFT 0 #define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 55d20acfcf70..99ea543dd245 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1125,10 +1125,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED)); - } else if (strncmp(cmd_buf, "empr", 4) == 0) { - dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); - i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED)); - } else if (strncmp(cmd_buf, "read", 4) == 0) { u32 address; u32 value; @@ -1732,29 +1728,15 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = { **/ void i40e_dbg_pf_init(struct i40e_pf *pf) { - struct dentry *pfile; const char *name = pci_name(pf->pdev); - const struct device *dev = &pf->pdev->dev; pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); - if (!pf->i40e_dbg_pf) - return; - - pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_command_fops); - if (!pfile) - goto create_failed; - pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_netdev_ops_fops); - if (!pfile) - goto create_failed; + debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_command_fops); - return; - -create_failed: - dev_info(dev, "debugfs dir/file for %s failed\n", name); - debugfs_remove_recursive(pf->i40e_dbg_pf); + debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_netdev_ops_fops); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 527eb52c5401..41e1240acaea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -711,6 +711,35 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, } /** + * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask + * @req_fec_info: mask request FEC info + * @ks: ethtool ksettings to fill in + **/ +static void i40e_get_settings_link_up_fec(u8 req_fec_info, + struct ethtool_link_ksettings *ks) +{ + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + + if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + } else { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_NONE); + if (I40E_AQ_SET_FEC_AUTO & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + } + } +} + +/** * i40e_get_settings_link_up - Get the Link settings for when link is up * @hw: hw structure * @ks: ethtool ksettings to fill in @@ -769,13 +798,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseSR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -892,9 +915,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseKR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); ethtool_link_ksettings_add_link_mode(ks, supported, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, supported, @@ -908,10 +928,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseKR_Full); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); ethtool_link_ksettings_add_link_mode(ks, advertising, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -929,13 +946,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); + break; case I40E_PHY_TYPE_25GBASE_AOC: case I40E_PHY_TYPE_25GBASE_ACC: @@ -945,13 +957,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); + ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -2250,7 +2257,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - struct i40e_veb *veb = pf->veb[pf->lan_veb]; + struct i40e_veb *veb = NULL; unsigned int i; bool veb_stats; u64 *p = data; @@ -2273,8 +2280,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, goto check_data_pointer; veb_stats = ((pf->lan_veb != I40E_NO_VEB) && + (pf->lan_veb < I40E_MAX_VEB) && (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)); + if (veb_stats) { + veb = pf->veb[pf->lan_veb]; + i40e_update_veb_stats(veb); + } + /* If veb stats aren't enabled, pass NULL instead of the veb so that * we initialize stats to zero and update the data pointer * intelligently @@ -2329,7 +2342,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) } if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) - return; + goto check_data_pointer; i40e_add_stat_strings(&data, i40e_gstrings_veb_stats); @@ -2341,6 +2354,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); +check_data_pointer: WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, "stat strings count mismatch!"); } @@ -5123,6 +5137,12 @@ static int i40e_get_module_info(struct net_device *netdev, /* Module is not SFF-8472 compliant */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) { + /* Module is SFF-8472 compliant but doesn't implement + * Digital Diagnostic Monitoring (DDM). + */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; } else { modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 19ce93d7fd0a..163ee8c6311c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include "i40e.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_status.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 994011c38fb4..be24d42280d8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include "i40e.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_type.h" @@ -963,7 +964,7 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes, /** * i40e_hmc_get_object_va - retrieves an object's virtual address - * @hmc_info: pointer to i40e_hmc_info struct + * @hw: the hardware struct, from which we obtain the i40e_hmc_info pointer * @object_base: pointer to u64 to get the va * @rsrc_type: the hmc resource type * @obj_idx: hmc object index @@ -972,16 +973,16 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes, * base pointer. This function is used for LAN Queue contexts. **/ static -i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info, - u8 **object_base, - enum i40e_hmc_lan_rsrc_type rsrc_type, - u32 obj_idx) +i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, + enum i40e_hmc_lan_rsrc_type rsrc_type, + u32 obj_idx) { + struct i40e_hmc_info *hmc_info = &hw->hmc; u32 obj_offset_in_sd, obj_offset_in_pd; - i40e_status ret_code = 0; struct i40e_hmc_sd_entry *sd_entry; struct i40e_hmc_pd_entry *pd_entry; u32 pd_idx, pd_lmt, rel_pd_idx; + i40e_status ret_code = 0; u64 obj_offset_in_fpm; u32 sd_idx, sd_lmt; @@ -1047,7 +1048,7 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; @@ -1068,7 +1069,7 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; @@ -1088,7 +1089,7 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; @@ -1109,7 +1110,7 @@ i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9ebbe3da61bb..6031223eafab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -73,6 +73,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, @@ -534,6 +535,10 @@ void i40e_pf_reset_stats(struct i40e_pf *pf) sizeof(pf->veb[i]->stats)); memset(&pf->veb[i]->stats_offsets, 0, sizeof(pf->veb[i]->stats_offsets)); + memset(&pf->veb[i]->tc_stats, 0, + sizeof(pf->veb[i]->tc_stats)); + memset(&pf->veb[i]->tc_stats_offsets, 0, + sizeof(pf->veb[i]->tc_stats_offsets)); pf->veb[i]->stat_offsets_loaded = false; } } @@ -677,7 +682,7 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) * i40e_update_veb_stats - Update Switch component statistics * @veb: the VEB being updated **/ -static void i40e_update_veb_stats(struct i40e_veb *veb) +void i40e_update_veb_stats(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; struct i40e_hw *hw = &pf->hw; @@ -2530,6 +2535,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) vsi_name, i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); + } else { + dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n", + vsi->netdev->name, + cur_multipromisc ? "entering" : "leaving"); } } @@ -2583,6 +2592,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) return; if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) return; + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) { + set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); + return; + } for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && @@ -2597,6 +2610,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) } } } + clear_bit(__I40E_VF_DISABLE, pf->state); } /** @@ -3360,7 +3374,7 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) err = i40e_configure_tx_ring(vsi->tx_rings[i]); - if (!i40e_enabled_xdp_vsi(vsi)) + if (err || !i40e_enabled_xdp_vsi(vsi)) return err; for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) @@ -6412,50 +6426,6 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) } /** - * i40e_update_dcb_config - * @hw: pointer to the HW struct - * @enable_mib_change: enable MIB change event - * - * Update DCB configuration from the firmware - **/ -static enum i40e_status_code -i40e_update_dcb_config(struct i40e_hw *hw, bool enable_mib_change) -{ - struct i40e_lldp_variables lldp_cfg; - i40e_status ret; - - if (!hw->func_caps.dcb) - return I40E_NOT_SUPPORTED; - - /* Read LLDP NVM area */ - ret = i40e_read_lldp_cfg(hw, &lldp_cfg); - if (ret) - return I40E_ERR_NOT_READY; - - /* Get DCBX status */ - ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); - if (ret) - return ret; - - /* Check the DCBX Status */ - if (hw->dcbx_status == I40E_DCBX_STATUS_DONE || - hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) { - /* Get current DCBX configuration */ - ret = i40e_get_dcb_config(hw); - if (ret) - return ret; - } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { - return I40E_ERR_NOT_READY; - } - - /* Configure the LLDP MIB change event */ - if (enable_mib_change) - ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); - - return ret; -} - -/** * i40e_init_pf_dcb - Initialize DCB configuration * @pf: PF being configured * @@ -6477,7 +6447,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) goto out; } - err = i40e_update_dcb_config(hw, true); + err = i40e_init_dcb(hw, true); if (!err) { /* Device/Function is not DCBX capable */ if ((!hw->func_caps.dcb) || @@ -6599,19 +6569,19 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) } if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { - req_fec = ", Requested FEC: None"; - fec = ", FEC: None"; - an = ", Autoneg: False"; + req_fec = "None"; + fec = "None"; + an = "False"; if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) - an = ", Autoneg: True"; + an = "True"; if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) - fec = ", FEC: CL74 FC-FEC/BASE-R"; + fec = "CL74 FC-FEC/BASE-R"; else if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) - fec = ", FEC: CL108 RS-FEC"; + fec = "CL108 RS-FEC"; /* 'CL108 RS-FEC' should be displayed when RS is requested, or * both RS and FC are requested @@ -6620,14 +6590,19 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) { if (vsi->back->hw.phy.link_info.req_fec_info & I40E_AQ_REQUEST_FEC_RS) - req_fec = ", Requested FEC: CL108 RS-FEC"; + req_fec = "CL108 RS-FEC"; else - req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R"; + req_fec = "CL74 FC-FEC/BASE-R"; } + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", + speed, req_fec, fec, an, fc); + } else { + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", + speed, fc); } - netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n", - speed, req_fec, fec, an, fc); } /** @@ -8486,6 +8461,11 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf, lock_acquired); + dev_info(&pf->pdev->dev, + pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? + "FW LLDP is disabled\n" : + "FW LLDP is enabled\n"); + } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; @@ -12561,7 +12541,8 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, if (need_reset && prog) for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->xdp_rings[i]->xsk_umem) - (void)i40e_xsk_async_xmit(vsi->netdev, i); + (void)i40e_xsk_wakeup(vsi->netdev, i, + XDP_WAKEUP_RX); return 0; } @@ -12883,7 +12864,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, - .ndo_xsk_async_xmit = i40e_xsk_async_xmit, + .ndo_xsk_wakeup = i40e_xsk_wakeup, .ndo_dfwd_add_station = i40e_fwd_add, .ndo_dfwd_del_station = i40e_fwd_del, }; @@ -14569,9 +14550,20 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) **/ static bool i40e_check_recovery_mode(struct i40e_pf *pf) { - u32 val = rd32(&pf->hw, I40E_GL_FWSTS); - - if (val & I40E_GL_FWSTS_FWS1B_MASK) { + u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; + bool is_recovery_mode = false; + + if (pf->hw.mac.type == I40E_MAC_XL710) + is_recovery_mode = + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK || + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK; + if (pf->hw.mac.type == I40E_MAC_X722) + is_recovery_mode = + val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || + val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK; + if (is_recovery_mode) { dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); set_bit(__I40E_RECOVERY_MODE, pf->state); @@ -14585,6 +14577,51 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf) } /** + * i40e_pf_loop_reset - perform reset in a loop. + * @pf: board private structure + * + * This function is useful when a NIC is about to enter recovery mode. + * When a NIC's internal data structures are corrupted the NIC's + * firmware is going to enter recovery mode. + * Right after a POR it takes about 7 minutes for firmware to enter + * recovery mode. Until that time a NIC is in some kind of intermediate + * state. After that time period the NIC almost surely enters + * recovery mode. The only way for a driver to detect intermediate + * state is to issue a series of pf-resets and check a return value. + * If a PF reset returns success then the firmware could be in recovery + * mode so the caller of this code needs to check for recovery mode + * if this function returns success. There is a little chance that + * firmware will hang in intermediate state forever. + * Since waiting 7 minutes is quite a lot of time this function waits + * 10 seconds and then gives up by returning an error. + * + * Return 0 on success, negative on failure. + **/ +static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) +{ + const unsigned short MAX_CNT = 1000; + const unsigned short MSECS = 10; + struct i40e_hw *hw = &pf->hw; + i40e_status ret; + int cnt; + + for (cnt = 0; cnt < MAX_CNT; ++cnt) { + ret = i40e_pf_reset(hw); + if (!ret) + break; + msleep(MSECS); + } + + if (cnt == MAX_CNT) { + dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); + return ret; + } + + pf->pfr_count++; + return ret; +} + +/** * i40e_init_recovery_mode - initialize subsystems needed in recovery mode * @pf: board private structure * @hw: ptr to the hardware info @@ -14812,14 +14849,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Reset here to make sure all is clean and to define PF 'n' */ i40e_clear_hw(hw); - if (!i40e_check_recovery_mode(pf)) { - err = i40e_pf_reset(hw); - if (err) { - dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); - goto err_pf_reset; - } - pf->pfr_count++; + + err = i40e_set_mac_type(hw); + if (err) { + dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", + err); + goto err_pf_reset; } + + err = i40e_pf_loop_reset(pf); + if (err) { + dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); + goto err_pf_reset; + } + + i40e_check_recovery_mode(pf); + hw->aq.num_arq_entries = I40E_AQ_LEN; hw->aq.num_asq_entries = I40E_AQ_LEN; hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; @@ -15605,8 +15650,7 @@ static void i40e_shutdown(struct pci_dev *pdev) **/ static int __maybe_unused i40e_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_pf *pf = dev_get_drvdata(dev); struct i40e_hw *hw = &pf->hw; /* If we're already suspended, then there is nothing to do */ @@ -15656,8 +15700,7 @@ static int __maybe_unused i40e_suspend(struct device *dev) **/ static int __maybe_unused i40e_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_pf *pf = dev_get_drvdata(dev); int err; /* If we're not suspended, then there is nothing to do */ @@ -15674,7 +15717,7 @@ static int __maybe_unused i40e_resume(struct device *dev) */ err = i40e_restore_interrupt_scheme(pf); if (err) { - dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n", + dev_err(dev, "Cannot restore interrupt scheme: %d\n", err); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index c508b75c3c09..e4d8d20baf3b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -322,6 +322,77 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, } /** + * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location + * @hw: pointer to the HW structure + * @module_ptr: Pointer to module in words with respect to NVM beginning + * @offset: offset in words from module start + * @words_data_size: Words to read from NVM + * @data_ptr: Pointer to memory location where resulting buffer will be stored + **/ +i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, + u8 module_ptr, u16 offset, + u16 words_data_size, + u16 *data_ptr) +{ + i40e_status status; + u16 ptr_value = 0; + u32 flat_offset; + + if (module_ptr != 0) { + status = i40e_read_nvm_word(hw, module_ptr, &ptr_value); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm word failed.Error code: %d.\n", + status); + return I40E_ERR_NVM; + } + } +#define I40E_NVM_INVALID_PTR_VAL 0x7FFF +#define I40E_NVM_INVALID_VAL 0xFFFF + + /* Pointer not initialized */ + if (ptr_value == I40E_NVM_INVALID_PTR_VAL || + ptr_value == I40E_NVM_INVALID_VAL) + return I40E_ERR_BAD_PTR; + + /* Check whether the module is in SR mapped area or outside */ + if (ptr_value & I40E_PTR_TYPE) { + /* Pointer points outside of the Shared RAM mapped area */ + ptr_value &= ~I40E_PTR_TYPE; + + /* PtrValue in 4kB units, need to convert to words */ + ptr_value /= 2; + flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset; + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!status) { + status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset, + 2 * words_data_size, + data_ptr, true, NULL); + i40e_release_nvm(hw); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm aq failed.Error code: %d.\n", + status); + return I40E_ERR_NVM; + } + } else { + return I40E_ERR_NVM; + } + } else { + /* Read from the Shadow RAM */ + status = i40e_read_nvm_buffer(hw, ptr_value + offset, + &words_data_size, data_ptr); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm buffer failed.Error code: %d.\n", + status); + } + } + + return status; +} + +/** * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). @@ -430,6 +501,36 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, } /** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + i40e_status ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, + data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); + } + + return ret_code; +} + +/** * i40e_write_nvm_aq - Writes Shadow RAM. * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index a07574bff550..c302ef2524f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -18,7 +18,10 @@ * actual OS primitives */ -#define hw_dbg(hw, S, A...) do {} while (0) +#define hw_dbg(hw, S, A...) \ +do { \ + dev_dbg(&((struct i40e_pf *)hw->back)->pdev->dev, S, ##A); \ +} while (0) #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #define rd32(a, reg) readl((a)->hw_addr + (reg)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index eac88bcc6c06..5250441bf75b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -315,6 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, void i40e_release_nvm(struct i40e_hw *hw); i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); +i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, + u8 module_ptr, u16 offset, + u16 words_data_size, + u16 *data_ptr); +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum); @@ -326,6 +332,8 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); +i40e_status i40e_set_mac_type(struct i40e_hw *hw); + extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 11394a52e21c..9bf1ad4319f5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -725,7 +725,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF; /* Set the previous "reset" time to the current Kernel clock time */ - pf->ptp_prev_hw_time = ktime_to_timespec64(ktime_get_real()); + ktime_get_real_ts64(&pf->ptp_prev_hw_time); pf->ptp_reset_start = ktime_get(); return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 52e3680c57f8..d35d690ca10f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -58,7 +58,7 @@ #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT) #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ #define I40E_PF_ARQT_ARQT_SHIFT 0 #define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) @@ -81,7 +81,7 @@ #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT) #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ #define I40E_PF_ATQT_ATQT_SHIFT 0 #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) @@ -108,7 +108,7 @@ #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT) #define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ARQT_MAX_INDEX 127 #define I40E_VF_ARQT_ARQT_SHIFT 0 @@ -136,7 +136,7 @@ #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT) #define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ATQT_MAX_INDEX 127 #define I40E_VF_ATQT_ATQT_SHIFT 0 @@ -259,7 +259,7 @@ #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 -#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) #define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 #define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) @@ -363,6 +363,12 @@ #define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) #define I40E_GL_FWSTS_FWS1B_SHIFT 16 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ #define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 #define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) @@ -503,7 +509,7 @@ #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 -#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSRWD_MAX_INDEX 3 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 @@ -1242,14 +1248,14 @@ #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 -#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) #define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 #define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 #define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) #define I40E_PFLAN_QALLOC_VALID_SHIFT 31 -#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT) #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ #define I40E_QRX_ENA_MAX_INDEX 1535 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0 @@ -1658,7 +1664,7 @@ #define I40E_GLNVM_SRCTL_START_SHIFT 30 #define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) #define I40E_GLNVM_SRCTL_DONE_SHIFT 31 -#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT) #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 #define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) @@ -3025,7 +3031,7 @@ #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 #define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 -#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT) #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VP_MDET_RX_MAX_INDEX 127 #define I40E_VP_MDET_RX_VALID_SHIFT 0 @@ -3161,7 +3167,7 @@ #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ #define I40E_VF_ARQT1_ARQT_SHIFT 0 #define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) @@ -3184,7 +3190,7 @@ #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ #define I40E_VF_ATQT1_ATQT_SHIFT 0 #define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2a2fe3ec7926..e3f29dc8b290 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3262,7 +3262,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) **/ bool __i40e_chk_linearize(struct sk_buff *skb) { - const struct skb_frag_struct *frag, *stale; + const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ @@ -3306,7 +3306,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) * descriptor associated with the fragment. */ if (stale_size > I40E_MAX_DATA_PER_TXD) { - int align_pad = -(stale->page_offset) & + int align_pad = -(skb_frag_off(stale)) & (I40E_MAX_READ_REQ_SIZE - 1); sum -= align_pad; @@ -3349,7 +3349,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); - struct skb_frag_struct *frag; + skb_frag_t *frag; struct i40e_tx_buffer *tx_bi; struct i40e_tx_desc *tx_desc; u16 i = tx_ring->next_to_use; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 100e92d2982f..36d37f31a287 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -521,7 +521,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) **/ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; int count = 0, size = skb_headlen(skb); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 8f43aa47c263..b43ec94a0f29 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -443,6 +443,7 @@ struct i40e_nvm_access { #define I40E_MODULE_SFF_8472_COMP 0x5E #define I40E_MODULE_SFF_8472_SWAP 0x5C #define I40E_MODULE_SFF_ADDR_MODE 0x04 +#define I40E_MODULE_SFF_DDM_IMPLEMENTED 0x40 #define I40E_MODULE_TYPE_QSFP_PLUS 0x0D #define I40E_MODULE_TYPE_QSFP28 0x11 #define I40E_MODULE_QSFP_MAX_LEN 640 @@ -623,6 +624,7 @@ struct i40e_hw { #define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) #define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) #define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5) +#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7) u64 flags; /* Used in set switch config AQ command */ @@ -1316,6 +1318,7 @@ struct i40e_hw_port_stats { #define I40E_SR_VPD_PTR 0x2F #define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E #define I40E_SR_SW_CHECKSUM_WORD 0x3F +#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48 /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ #define I40E_SR_VPD_MODULE_MAX_SIZE 1024 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 02b09a8ad54c..3d2440838822 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -55,7 +55,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; - if (vf->link_forced) { + + /* Always report link is down if the VF queues aren't enabled */ + if (!vf->queues_enabled) { + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; + } else if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_speed = (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); @@ -65,6 +70,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed); } + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); } @@ -2037,30 +2043,33 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) alluni = true; aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, alluni); - if (!aq_ret) { - if (allmulti) { + if (aq_ret) + goto err_out; + + if (allmulti) { + if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC, + &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully set multicast promiscuous mode\n", vf->vf_id); - set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - } else { - dev_info(&pf->pdev->dev, - "VF %d successfully unset multicast promiscuous mode\n", - vf->vf_id); - clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - } - if (alluni) { + } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC, + &vf->vf_states)) + dev_info(&pf->pdev->dev, + "VF %d successfully unset multicast promiscuous mode\n", + vf->vf_id); + + if (alluni) { + if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC, + &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully set unicast promiscuous mode\n", vf->vf_id); - set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); - } else { - dev_info(&pf->pdev->dev, - "VF %d successfully unset unicast promiscuous mode\n", - vf->vf_id); - clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); - } - } + } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC, + &vf->vf_states)) + dev_info(&pf->pdev->dev, + "VF %d successfully unset unicast promiscuous mode\n", + vf->vf_id); + err_out: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, @@ -2153,7 +2162,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) * VF does not know about these additional VSIs and all * it cares is about its own queues. PF configures these queues * to its appropriate VSIs based on TC mapping - **/ + */ if (vf->adq_enabled) { if (idx >= ARRAY_SIZE(vf->ch)) { aq_ret = I40E_ERR_NO_AVAILABLE_VSI; @@ -2364,6 +2373,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) } } + vf->queues_enabled = true; + error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, @@ -2385,6 +2396,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; + /* Immediately mark queues as disabled */ + vf->queues_enabled = false; + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -3953,10 +3967,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* When the VF is resetting wait until it is done. * It can take up to 200 milliseconds, * but wait for up to 300 milliseconds to be safe. + * If the VF is indeed in reset, the vsi pointer has + * to show on the newly loaded vsi under pf->vsi[id]. */ for (i = 0; i < 15; i++) { - if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + if (i > 0) + vsi = pf->vsi[vf->lan_vsi_idx]; break; + } msleep(20); } if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { @@ -4244,7 +4263,8 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, if (min_tx_rate) { dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", min_tx_rate, vf_id); - return -EINVAL; + ret = -EINVAL; + goto error; } vf = &pf->vf[vf_id]; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index f65cc0c16550..7164b9bb294f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -99,6 +99,7 @@ struct i40e_vf { unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ bool link_forced; bool link_up; /* only valid if VF link is forced */ + bool queues_enabled; /* true if the VF queues are enabled */ bool spoofchk; u16 num_mac; u16 num_vlan; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 32bad014d76c..b1c3227ae4ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -116,7 +116,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, return err; /* Kick start the NAPI context so that receiving will start */ - err = i40e_xsk_async_xmit(vsi->netdev, qid); + err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); if (err) return err; } @@ -157,6 +157,11 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) err = i40e_queue_pair_enable(vsi, qid); if (err) return err; + + /* Kick start the NAPI context so that receiving will start */ + err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); + if (err) + return err; } return 0; @@ -190,9 +195,11 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, **/ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { + struct xdp_umem *umem = rx_ring->xsk_umem; int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; struct bpf_prog *xdp_prog; + u64 offset; u32 act; rcu_read_lock(); @@ -201,7 +208,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) */ xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); - xdp->handle += xdp->data - xdp->data_hard_start; + offset = xdp->data - xdp->data_hard_start; + + xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); + switch (act) { case XDP_PASS: break; @@ -262,7 +272,7 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); xsk_umem_discard_addr(umem); return true; @@ -299,7 +309,7 @@ static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); xsk_umem_discard_addr_rq(umem); return true; @@ -420,8 +430,6 @@ static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, struct i40e_rx_buffer *old_bi) { struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; - unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; - u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; u16 nta = rx_ring->next_to_alloc; /* update, and store next to alloc */ @@ -429,14 +437,9 @@ static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - new_bi->dma = old_bi->dma & mask; - new_bi->dma += hr; - - new_bi->addr = (void *)((unsigned long)old_bi->addr & mask); - new_bi->addr += hr; - - new_bi->handle = old_bi->handle & mask; - new_bi->handle += rx_ring->xsk_umem->headroom; + new_bi->dma = old_bi->dma; + new_bi->addr = old_bi->addr; + new_bi->handle = old_bi->handle; old_bi->addr = NULL; } @@ -471,7 +474,8 @@ void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); bi->addr += hr; - bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; + bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, + rx_ring->xsk_umem->headroom); } /** @@ -626,6 +630,15 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); + + if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) + xsk_set_rx_need_wakeup(rx_ring->xsk_umem); + else + xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); + + return (int)total_rx_packets; + } return failure ? budget : (int)total_rx_packets; } @@ -681,6 +694,8 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) i40e_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) + xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return !!budget && work_done; @@ -759,19 +774,27 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); out_xmit: + if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { + if (tx_ring->next_to_clean == tx_ring->next_to_use) + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); + else + xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); + } + xmit_done = i40e_xmit_zc(tx_ring, budget); return work_done && xmit_done; } /** - * i40e_xsk_async_xmit - Implements the ndo_xsk_async_xmit + * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup * @dev: the netdevice * @queue_id: queue id to wake up + * @flags: ignored in our case since we have Rx and Tx in the same NAPI. * * Returns <0 for errors, 0 otherwise. **/ -int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id) +int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index 8cc0a2e7d9a2..9ed59c14eb55 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -18,6 +18,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring, int napi_budget); -int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id); +int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); #endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 9fc635d816d2..29de3ae96ef2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -253,7 +253,6 @@ struct iavf_adapter { #define IAVF_FLAG_RESET_PENDING BIT(4) #define IAVF_FLAG_RESET_NEEDED BIT(5) #define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) -#define IAVF_FLAG_ADDR_SET_BY_PF BIT(8) #define IAVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) #define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) #define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 9d2b50964a08..8f310e520b06 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -143,28 +143,6 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, } /** - * iavf_debug_d - OS dependent version of debug printing - * @hw: pointer to the HW structure - * @mask: debug level mask - * @fmt_str: printf-type format description - **/ -void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...) -{ - char buf[512]; - va_list argptr; - - if (!(mask & ((struct iavf_hw *)hw)->debug_mask)) - return; - - va_start(argptr, fmt_str); - vsnprintf(buf, sizeof(buf), fmt_str, argptr); - va_end(argptr); - - /* the debug string is already formatted with a newline */ - pr_info("%s", buf); -} - -/** * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure **/ @@ -812,9 +790,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p) if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) return 0; - if (adapter->flags & IAVF_FLAG_ADDR_SET_BY_PF) - return -EPERM; - spin_lock_bh(&adapter->mac_vlan_list_lock); f = iavf_find_filter(adapter, hw->mac.addr); @@ -829,7 +804,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p) if (f) { ether_addr_copy(hw->mac.addr, addr->sa_data); - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); } return (f == NULL) ? -ENOMEM : 0; @@ -1833,7 +1807,6 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) eth_hw_addr_random(netdev); ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { - adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF; ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 0cca1b589b56..7a30d5d5ef53 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2161,7 +2161,7 @@ static void iavf_create_tx_ctx(struct iavf_ring *tx_ring, **/ bool __iavf_chk_linearize(struct sk_buff *skb) { - const struct skb_frag_struct *frag, *stale; + const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ @@ -2205,7 +2205,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb) * descriptor associated with the fragment. */ if (stale_size > IAVF_MAX_DATA_PER_TXD) { - int align_pad = -(stale->page_offset) & + int align_pad = -(skb_frag_off(stale)) & (IAVF_MAX_READ_REQ_SIZE - 1); sum -= align_pad; @@ -2269,7 +2269,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); - struct skb_frag_struct *frag; + skb_frag_t *frag; struct iavf_tx_buffer *tx_bi; struct iavf_tx_desc *tx_desc; u16 i = tx_ring->next_to_use; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index 71e7d090f8db..dd3348f9da9d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -462,7 +462,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb); **/ static inline int iavf_xmit_descriptor_count(struct sk_buff *skb) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; int count = 0, size = skb_headlen(skb); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index d49d58a6de80..c46770eba320 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1252,6 +1252,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, case VIRTCHNL_OP_ADD_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); + /* restore administratively set MAC address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); break; case VIRTCHNL_OP_DEL_VLAN: dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", @@ -1319,6 +1321,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } } switch (v_opcode) { + case VIRTCHNL_OP_ADD_ETH_ADDR: { + if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + } + break; case VIRTCHNL_OP_GET_STATS: { struct iavf_eth_stats *stats = (struct iavf_eth_stats *)msg; diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 2d140ba83781..9edde960b4f2 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -15,6 +15,7 @@ ice-y := ice_main.o \ ice_sched.o \ ice_lib.o \ ice_txrx.o \ + ice_flex_pipe.o \ ice_ethtool.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 9ee6b55553c0..45e100666049 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -8,6 +8,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/firmware.h> #include <linux/netdevice.h> #include <linux/compiler.h> #include <linux/etherdevice.h> @@ -29,6 +30,7 @@ #include <linux/sctp.h> #include <linux/ipv6.h> #include <linux/if_bridge.h> +#include <linux/ctype.h> #include <linux/avf/virtchnl.h> #include <net/ipv6.h> #include "ice_devids.h" @@ -47,33 +49,16 @@ extern const char ice_drv_ver[]; #define ICE_MIN_NUM_DESC 64 #define ICE_MAX_NUM_DESC 8160 #define ICE_DFLT_MIN_RX_DESC 512 -/* if the default number of Rx descriptors between ICE_MAX_NUM_DESC and the - * number of descriptors to fill up an entire page is greater than or equal to - * ICE_DFLT_MIN_RX_DESC set it based on page size, otherwise set it to - * ICE_DFLT_MIN_RX_DESC - */ -#define ICE_DFLT_NUM_RX_DESC \ - min_t(u16, ICE_MAX_NUM_DESC, \ - max_t(u16, ALIGN(PAGE_SIZE / sizeof(union ice_32byte_rx_desc), \ - ICE_REQ_DESC_MULTIPLE), \ - ICE_DFLT_MIN_RX_DESC)) -/* set default number of Tx descriptors to the minimum between ICE_MAX_NUM_DESC - * and the number of descriptors to fill up an entire page - */ -#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \ - ALIGN(PAGE_SIZE / \ - sizeof(struct ice_tx_desc), \ - ICE_REQ_DESC_MULTIPLE)) +#define ICE_DFLT_NUM_TX_DESC 256 +#define ICE_DFLT_NUM_RX_DESC 2048 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) -#define ICE_ETHTOOL_FWVER_LEN 32 #define ICE_AQ_LEN 64 -#define ICE_MBXQ_LEN 64 +#define ICE_MBXSQ_LEN 64 +#define ICE_MBXRQ_LEN 512 #define ICE_MIN_MSIX 2 #define ICE_NO_VSI 0xffff -#define ICE_MAX_TXQS 2048 -#define ICE_MAX_RXQS 2048 #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 #define ICE_MAX_SCATTER_TXQS 16 @@ -86,16 +71,6 @@ extern const char ice_drv_ver[]; #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) #define ICE_INVAL_Q_INDEX 0xffff #define ICE_INVAL_VFID 256 -#define ICE_MAX_VF_COUNT 256 -#define ICE_MAX_QS_PER_VF 256 -#define ICE_MIN_QS_PER_VF 1 -#define ICE_DFLT_QS_PER_VF 4 -#define ICE_NONQ_VECS_VF 1 -#define ICE_MAX_SCATTER_QS_PER_VF 16 -#define ICE_MAX_BASE_QS_PER_VF 16 -#define ICE_MAX_INTR_PER_VF 65 -#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) -#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) #define ICE_MAX_RESET_WAIT 20 @@ -220,6 +195,7 @@ enum ice_state { __ICE_CFG_BUSY, __ICE_SERVICE_SCHED, __ICE_SERVICE_DIS, + __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ __ICE_STATE_NBITS /* must be last */ }; @@ -257,9 +233,6 @@ struct ice_vsi { u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ - /* Interrupt thresholds */ - u16 work_lmt; - s16 vf_id; /* VF ID for SR-IOV VSIs */ u16 ethtype; /* Ethernet protocol for pause frame */ @@ -292,8 +265,8 @@ struct ice_vsi { /* queue information */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ - u16 txq_map[ICE_MAX_TXQS]; /* index in pf->avail_txqs */ - u16 rxq_map[ICE_MAX_RXQS]; /* index in pf->avail_rxqs */ + u16 *txq_map; /* index in pf->avail_txqs */ + u16 *rxq_map; /* index in pf->avail_rxqs */ u16 alloc_txq; /* Allocated Tx queues */ u16 num_txq; /* Used Tx queues */ u16 alloc_rxq; /* Allocated Rx queues */ @@ -329,15 +302,16 @@ struct ice_q_vector { } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { - ICE_FLAG_MSIX_ENA, ICE_FLAG_FLTR_SYNC, ICE_FLAG_RSS_ENA, ICE_FLAG_SRIOV_ENA, ICE_FLAG_SRIOV_CAPABLE, ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_ENA, + ICE_FLAG_ADV_FEATURES, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, - ICE_FLAG_ENABLE_FW_LLDP, + ICE_FLAG_NO_MEDIA, + ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -363,9 +337,9 @@ struct ice_pf { u16 num_vf_qps; /* num queue pairs per VF */ u16 num_vf_msix; /* num vectors per VF */ DECLARE_BITMAP(state, __ICE_STATE_NBITS); - DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS); - DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); + unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ + unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ unsigned long serv_tmr_period; unsigned long serv_tmr_prev; struct timer_list serv_tmr; @@ -376,11 +350,11 @@ struct ice_pf { u32 hw_csum_rx_error; u32 oicr_idx; /* Other interrupt cause MSIX vector index */ u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ + u16 max_pf_txqs; /* Total Tx queues PF wide */ + u16 max_pf_rxqs; /* Total Rx queues PF wide */ u32 num_lan_msix; /* Total MSIX vectors for base driver */ u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */ - u16 q_left_tx; /* remaining num Tx queues left unclaimed */ - u16 q_left_rx; /* remaining num Rx queues left unclaimed */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ u16 num_alloc_vsi; u16 corer_count; /* Core reset count */ @@ -433,21 +407,26 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, } /** - * ice_find_vsi_by_type - Find and return VSI of a given type - * @pf: PF to search for VSI - * @type: Value indicating type of VSI we are looking for + * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev + * @netdev: pointer to the netdev struct */ -static inline struct ice_vsi * -ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type) +static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) { - int i; + struct ice_netdev_priv *np = netdev_priv(netdev); - for (i = 0; i < pf->num_alloc_vsi; i++) { - struct ice_vsi *vsi = pf->vsi[i]; + return np->vsi->back; +} - if (vsi && vsi->type == type) - return vsi; - } +/** + * ice_get_main_vsi - Get the PF VSI + * @pf: PF instance + * + * returns pf->vsi[0], which by definition is the PF VSI + */ +static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) +{ + if (pf->vsi) + return pf->vsi[0]; return NULL; } @@ -455,6 +434,11 @@ ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type) int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); +void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); +u16 ice_get_avail_txq_count(struct ice_pf *pf); +u16 ice_get_avail_rxq_count(struct ice_pf *pf); +void ice_update_vsi_stats(struct ice_vsi *vsi); +void ice_update_pf_stats(struct ice_pf *pf); int ice_up(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi); int ice_vsi_cfg(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 765e3c2ed045..023e3d2fee5f 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -33,11 +33,22 @@ struct ice_aqc_get_ver { u8 api_patch; }; +/* Send driver version (indirect 0x0002) */ +struct ice_aqc_driver_ver { + u8 major_ver; + u8 minor_ver; + u8 build_ver; + u8 subbuild_ver; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + /* Queue Shutdown (direct 0x0003) */ struct ice_aqc_q_shutdown { - __le32 driver_unloading; + u8 driver_unloading; #define ICE_AQC_DRIVER_UNLOADING BIT(0) - u8 reserved[12]; + u8 reserved[15]; }; /* Request resource ownership (direct 0x0008) @@ -91,6 +102,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_SRIOV 0x0012 #define ICE_AQC_CAPS_VF 0x0013 #define ICE_AQC_CAPS_VSI 0x0017 +#define ICE_AQC_CAPS_DCB 0x0018 #define ICE_AQC_CAPS_RSS 0x0040 #define ICE_AQC_CAPS_RXQS 0x0041 #define ICE_AQC_CAPS_TXQS 0x0042 @@ -1518,6 +1530,56 @@ struct ice_aqc_get_clear_fw_log { __le32 addr_low; }; +/* Download Package (indirect 0x0C40) */ +/* Also used for Update Package (indirect 0x0C42) */ +struct ice_aqc_download_pkg { + u8 flags; +#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01 + u8 reserved[3]; + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_download_pkg_resp { + __le32 error_offset; + __le32 error_info; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get Package Info List (indirect 0x0C43) */ +struct ice_aqc_get_pkg_info_list { + __le32 reserved1; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Version format for packages */ +struct ice_pkg_ver { + u8 major; + u8 minor; + u8 update; + u8 draft; +}; + +#define ICE_PKG_NAME_SIZE 32 + +struct ice_aqc_get_pkg_info { + struct ice_pkg_ver ver; + char name[ICE_PKG_NAME_SIZE]; + u8 is_in_nvm; + u8 is_active; + u8 is_active_at_boot; + u8 is_modified; +}; + +/* Get Package Info List response buffer format (0x0C43) */ +struct ice_aqc_get_pkg_info_resp { + __le32 count; + struct ice_aqc_get_pkg_info pkg_info[1]; +}; /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -1546,6 +1608,7 @@ struct ice_aq_desc { u8 raw[16]; struct ice_aqc_generic generic; struct ice_aqc_get_ver get_ver; + struct ice_aqc_driver_ver driver_ver; struct ice_aqc_q_shutdown q_shutdown; struct ice_aqc_req_res res_owner; struct ice_aqc_manage_mac_read mac_read; @@ -1579,6 +1642,7 @@ struct ice_aq_desc { struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_fw_logging fw_logging; struct ice_aqc_get_clear_fw_log get_clear_fw_log; + struct ice_aqc_download_pkg download_pkg; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_set_event_mask set_event_mask; @@ -1610,12 +1674,19 @@ enum ice_aq_err { ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ ICE_AQ_RC_EEXIST = 13, /* Object already exists */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ + ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */ + ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */ + ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */ + ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */ + ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */ + ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */ }; /* Admin Queue command opcodes */ enum ice_adminq_opc { /* AQ commands */ ice_aqc_opc_get_ver = 0x0001, + ice_aqc_opc_driver_ver = 0x0002, ice_aqc_opc_q_shutdown = 0x0003, /* resource ownership */ @@ -1697,6 +1768,10 @@ enum ice_adminq_opc { ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, + /* package commands */ + ice_aqc_opc_download_pkg = 0x0C40, + ice_aqc_opc_get_pkg_info_list = 0x0C43, + /* debug commands */ ice_aqc_opc_fw_logging = 0xFF09, ice_aqc_opc_fw_logging_info = 0xFF10, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2e0731c1e1a3..3a6b3950eb0e 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -263,21 +263,23 @@ enum ice_status ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd) { - struct ice_link_status *hw_link_info_old, *hw_link_info; struct ice_aqc_get_link_status_data link_data = { 0 }; struct ice_aqc_get_link_status *resp; + struct ice_link_status *li_old, *li; enum ice_media_type *hw_media_type; struct ice_fc_info *hw_fc_info; bool tx_pause, rx_pause; struct ice_aq_desc desc; enum ice_status status; + struct ice_hw *hw; u16 cmd_flags; if (!pi) return ICE_ERR_PARAM; - hw_link_info_old = &pi->phy.link_info_old; + hw = pi->hw; + li_old = &pi->phy.link_info_old; hw_media_type = &pi->phy.media_type; - hw_link_info = &pi->phy.link_info; + li = &pi->phy.link_info; hw_fc_info = &pi->fc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); @@ -286,27 +288,27 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, resp->cmd_flags = cpu_to_le16(cmd_flags); resp->lport_num = pi->lport; - status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data), - cd); + status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); if (status) return status; /* save off old link status information */ - *hw_link_info_old = *hw_link_info; + *li_old = *li; /* update current link status information */ - hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); - hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); - hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high); + li->link_speed = le16_to_cpu(link_data.link_speed); + li->phy_type_low = le64_to_cpu(link_data.phy_type_low); + li->phy_type_high = le64_to_cpu(link_data.phy_type_high); *hw_media_type = ice_get_media_type(pi); - hw_link_info->link_info = link_data.link_info; - hw_link_info->an_info = link_data.an_info; - hw_link_info->ext_info = link_data.ext_info; - hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); - hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; - hw_link_info->topo_media_conflict = link_data.topo_media_conflict; - hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; + li->link_info = link_data.link_info; + li->an_info = link_data.an_info; + li->ext_info = link_data.ext_info; + li->max_frame_size = le16_to_cpu(link_data.max_frame_size); + li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; + li->topo_media_conflict = link_data.topo_media_conflict; + li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | + ICE_AQ_CFG_PACING_TYPE_M); /* update fc info */ tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); @@ -320,12 +322,24 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, else hw_fc_info->current_mode = ICE_FC_NONE; - hw_link_info->lse_ena = - !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); + li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); + + ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed); + ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", + (unsigned long long)li->phy_type_low); + ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", + (unsigned long long)li->phy_type_high); + ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type); + ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info); + ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info); + ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info); + ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena); + ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size); + ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing); /* save link status information */ if (link) - *link = *hw_link_info; + *link = *li; /* flag cleared so calling functions don't call AQ again */ pi->phy.get_link_info = false; @@ -715,6 +729,29 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) } /** + * ice_get_nvm_version - get cached NVM version data + * @hw: pointer to the hardware structure + * @oem_ver: 8 bit NVM version + * @oem_build: 16 bit NVM build number + * @oem_patch: 8 NVM patch number + * @ver_hi: high 16 bits of the NVM version + * @ver_lo: low 16 bits of the NVM version + */ +void +ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, + u8 *oem_patch, u8 *ver_hi, u8 *ver_lo) +{ + struct ice_nvm_info *nvm = &hw->nvm; + + *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT); + *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK); + *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >> + ICE_OEM_VER_BUILD_SHIFT); + *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; + *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; +} + +/** * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ @@ -740,7 +777,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ice_get_itr_intrl_gran(hw); - status = ice_init_all_ctrlq(hw); + status = ice_create_all_ctrlq(hw); if (status) goto err_unroll_cqinit; @@ -845,7 +882,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC); ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2); - + status = ice_init_hw_tbls(hw); + if (status) + goto err_unroll_fltr_mgmt_struct; return 0; err_unroll_fltr_mgmt_struct: @@ -855,7 +894,7 @@ err_unroll_sched: err_unroll_alloc: devm_kfree(ice_hw_to_dev(hw), hw->port_info); err_unroll_cqinit: - ice_shutdown_all_ctrlq(hw); + ice_destroy_all_ctrlq(hw); return status; } @@ -873,6 +912,8 @@ void ice_deinit_hw(struct ice_hw *hw) ice_sched_cleanup_all(hw); ice_sched_clear_agg(hw); + ice_free_seg(hw); + ice_free_hw_tbls(hw); if (hw->port_info) { devm_kfree(ice_hw_to_dev(hw), hw->port_info); @@ -881,7 +922,7 @@ void ice_deinit_hw(struct ice_hw *hw) /* Attempt to disable FW logging before shutting down control queues */ ice_cfg_fw_log(hw, false); - ice_shutdown_all_ctrlq(hw); + ice_destroy_all_ctrlq(hw); /* Clear VSI contexts if not already cleared */ ice_clear_all_vsi_ctx(hw); @@ -1078,6 +1119,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), + ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), { 0 } }; @@ -1088,7 +1130,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * @rxq_index: the index of the Rx queue * * Converts rxq context from sparse to dense structure and then writes - * it to HW register space + * it to HW register space and enables the hardware to prefetch descriptors + * instead of only fetching them on demand */ enum ice_status ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, @@ -1096,6 +1139,11 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + if (!rlan_ctx) + return ICE_ERR_BAD_PTR; + + rlan_ctx->prefena = 1; + ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); } @@ -1111,6 +1159,7 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), + ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), @@ -1129,7 +1178,7 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), - ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171), + ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), { 0 } }; @@ -1185,6 +1234,12 @@ ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf, /* FW Admin Queue command wrappers */ +/* Software lock/mutex that is meant to be held while the Global Config Lock + * in firmware is acquired by the software to prevent most (but not all) types + * of AQ commands from being sent to FW + */ +DEFINE_MUTEX(ice_global_cfg_lock_sw); + /** * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue * @hw: pointer to the HW struct @@ -1199,7 +1254,38 @@ enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { - return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); + struct ice_aqc_req_res *cmd = &desc->params.res_owner; + bool lock_acquired = false; + enum ice_status status; + + /* When a package download is in process (i.e. when the firmware's + * Global Configuration Lock resource is held), only the Download + * Package, Get Version, Get Package Info List and Release Resource + * (with resource ID set to Global Config Lock) AdminQ commands are + * allowed; all others must block until the package download completes + * and the Global Config Lock is released. See also + * ice_acquire_global_cfg_lock(). + */ + switch (le16_to_cpu(desc->opcode)) { + case ice_aqc_opc_download_pkg: + case ice_aqc_opc_get_pkg_info_list: + case ice_aqc_opc_get_ver: + break; + case ice_aqc_opc_release_res: + if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) + break; + /* fall-through */ + default: + mutex_lock(&ice_global_cfg_lock_sw); + lock_acquired = true; + break; + } + + status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); + if (lock_acquired) + mutex_unlock(&ice_global_cfg_lock_sw); + + return status; } /** @@ -1237,6 +1323,43 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) } /** + * ice_aq_send_driver_ver + * @hw: pointer to the HW struct + * @dv: driver's major, minor version + * @cd: pointer to command details structure or NULL + * + * Send the driver version (0x0002) to the firmware + */ +enum ice_status +ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, + struct ice_sq_cd *cd) +{ + struct ice_aqc_driver_ver *cmd; + struct ice_aq_desc desc; + u16 len; + + cmd = &desc.params.driver_ver; + + if (!dv) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + cmd->major_ver = dv->major_ver; + cmd->minor_ver = dv->minor_ver; + cmd->build_ver = dv->build_ver; + cmd->subbuild_ver = dv->subbuild_ver; + + len = 0; + while (len < sizeof(dv->driver_string) && + isascii(dv->driver_string[len]) && dv->driver_string[len]) + len++; + + return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); +} + +/** * ice_aq_q_shutdown * @hw: pointer to the HW struct * @unloading: is the driver unloading itself @@ -1254,7 +1377,7 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); if (unloading) - cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING); + cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } @@ -1529,29 +1652,29 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, case ICE_AQC_CAPS_VALID_FUNCTIONS: caps->valid_functions = number; ice_debug(hw, ICE_DBG_INIT, - "%s: valid functions = %d\n", prefix, + "%s: valid_functions (bitmap) = %d\n", prefix, caps->valid_functions); break; case ICE_AQC_CAPS_SRIOV: caps->sr_iov_1_1 = (number == 1); ice_debug(hw, ICE_DBG_INIT, - "%s: SR-IOV = %d\n", prefix, + "%s: sr_iov_1_1 = %d\n", prefix, caps->sr_iov_1_1); break; case ICE_AQC_CAPS_VF: if (dev_p) { dev_p->num_vfs_exposed = number; ice_debug(hw, ICE_DBG_INIT, - "%s: VFs exposed = %d\n", prefix, + "%s: num_vfs_exposed = %d\n", prefix, dev_p->num_vfs_exposed); } else if (func_p) { func_p->num_allocd_vfs = number; func_p->vf_base_id = logical_id; ice_debug(hw, ICE_DBG_INIT, - "%s: VFs allocated = %d\n", prefix, + "%s: num_allocd_vfs = %d\n", prefix, func_p->num_allocd_vfs); ice_debug(hw, ICE_DBG_INIT, - "%s: VF base_id = %d\n", prefix, + "%s: vf_base_id = %d\n", prefix, func_p->vf_base_id); } break; @@ -1559,63 +1682,75 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, if (dev_p) { dev_p->num_vsi_allocd_to_host = number; ice_debug(hw, ICE_DBG_INIT, - "%s: num VSI alloc to host = %d\n", + "%s: num_vsi_allocd_to_host = %d\n", prefix, dev_p->num_vsi_allocd_to_host); } else if (func_p) { func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); ice_debug(hw, ICE_DBG_INIT, - "%s: num guaranteed VSI (fw) = %d\n", + "%s: guar_num_vsi (fw) = %d\n", prefix, number); ice_debug(hw, ICE_DBG_INIT, - "%s: num guaranteed VSI = %d\n", + "%s: guar_num_vsi = %d\n", prefix, func_p->guar_num_vsi); } break; + case ICE_AQC_CAPS_DCB: + caps->dcb = (number == 1); + caps->active_tc_bitmap = logical_id; + caps->maxtc = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: dcb = %d\n", prefix, caps->dcb); + ice_debug(hw, ICE_DBG_INIT, + "%s: active_tc_bitmap = %d\n", prefix, + caps->active_tc_bitmap); + ice_debug(hw, ICE_DBG_INIT, + "%s: maxtc = %d\n", prefix, caps->maxtc); + break; case ICE_AQC_CAPS_RSS: caps->rss_table_size = number; caps->rss_table_entry_width = logical_id; ice_debug(hw, ICE_DBG_INIT, - "%s: RSS table size = %d\n", prefix, + "%s: rss_table_size = %d\n", prefix, caps->rss_table_size); ice_debug(hw, ICE_DBG_INIT, - "%s: RSS table width = %d\n", prefix, + "%s: rss_table_entry_width = %d\n", prefix, caps->rss_table_entry_width); break; case ICE_AQC_CAPS_RXQS: caps->num_rxq = number; caps->rxq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: num Rx queues = %d\n", prefix, + "%s: num_rxq = %d\n", prefix, caps->num_rxq); ice_debug(hw, ICE_DBG_INIT, - "%s: Rx first queue ID = %d\n", prefix, + "%s: rxq_first_id = %d\n", prefix, caps->rxq_first_id); break; case ICE_AQC_CAPS_TXQS: caps->num_txq = number; caps->txq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: num Tx queues = %d\n", prefix, + "%s: num_txq = %d\n", prefix, caps->num_txq); ice_debug(hw, ICE_DBG_INIT, - "%s: Tx first queue ID = %d\n", prefix, + "%s: txq_first_id = %d\n", prefix, caps->txq_first_id); break; case ICE_AQC_CAPS_MSIX: caps->num_msix_vectors = number; caps->msix_vector_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: MSIX vector count = %d\n", prefix, + "%s: num_msix_vectors = %d\n", prefix, caps->num_msix_vectors); ice_debug(hw, ICE_DBG_INIT, - "%s: MSIX first vector index = %d\n", prefix, + "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; - ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n", + ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", prefix, caps->max_mtu); break; default: @@ -1712,6 +1847,75 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) } /** + * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode + * @hw: pointer to the hardware structure + */ +void ice_set_safe_mode_caps(struct ice_hw *hw) +{ + struct ice_hw_func_caps *func_caps = &hw->func_caps; + struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; + u32 valid_func, rxq_first_id, txq_first_id; + u32 msix_vector_first_id, max_mtu; + u32 num_func = 0; + u8 i; + + /* cache some func_caps values that should be restored after memset */ + valid_func = func_caps->common_cap.valid_functions; + txq_first_id = func_caps->common_cap.txq_first_id; + rxq_first_id = func_caps->common_cap.rxq_first_id; + msix_vector_first_id = func_caps->common_cap.msix_vector_first_id; + max_mtu = func_caps->common_cap.max_mtu; + + /* unset func capabilities */ + memset(func_caps, 0, sizeof(*func_caps)); + + /* restore cached values */ + func_caps->common_cap.valid_functions = valid_func; + func_caps->common_cap.txq_first_id = txq_first_id; + func_caps->common_cap.rxq_first_id = rxq_first_id; + func_caps->common_cap.msix_vector_first_id = msix_vector_first_id; + func_caps->common_cap.max_mtu = max_mtu; + + /* one Tx and one Rx queue in safe mode */ + func_caps->common_cap.num_rxq = 1; + func_caps->common_cap.num_txq = 1; + + /* two MSIX vectors, one for traffic and one for misc causes */ + func_caps->common_cap.num_msix_vectors = 2; + func_caps->guar_num_vsi = 1; + + /* cache some dev_caps values that should be restored after memset */ + valid_func = dev_caps->common_cap.valid_functions; + txq_first_id = dev_caps->common_cap.txq_first_id; + rxq_first_id = dev_caps->common_cap.rxq_first_id; + msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id; + max_mtu = dev_caps->common_cap.max_mtu; + + /* unset dev capabilities */ + memset(dev_caps, 0, sizeof(*dev_caps)); + + /* restore cached values */ + dev_caps->common_cap.valid_functions = valid_func; + dev_caps->common_cap.txq_first_id = txq_first_id; + dev_caps->common_cap.rxq_first_id = rxq_first_id; + dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id; + dev_caps->common_cap.max_mtu = max_mtu; + + /* valid_func is a bitmap. get number of functions */ +#define ICE_MAX_FUNCS 8 + for (i = 0; i < ICE_MAX_FUNCS; i++) + if (valid_func & BIT(i)) + num_func++; + + /* one Tx and one Rx queue per function in safe mode */ + dev_caps->common_cap.num_rxq = num_func; + dev_caps->common_cap.num_txq = num_func; + + /* two MSIX vectors per function */ + dev_caps->common_cap.num_msix_vectors = 2 * num_func; +} + +/** * ice_get_caps - get info about the HW * @hw: pointer to the hardware structure */ @@ -1993,6 +2197,17 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, desc.params.set_phy.lport_num = lport; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", + (unsigned long long)le64_to_cpu(cfg->phy_type_low)); + ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", + (unsigned long long)le64_to_cpu(cfg->phy_type_high)); + ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps); + ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n", + cfg->low_power_ctrl); + ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap); + ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value); + ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt); + return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); } @@ -2024,7 +2239,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); if (!status) memcpy(li->module_type, &pcaps->module_type, @@ -2174,27 +2389,24 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) { switch (fec) { case ICE_FEC_BASER: - /* Clear auto FEC and RS bits, and AND BASE-R ability + /* Clear RS bits, and AND BASE-R ability * bits and OR request bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | ICE_AQC_PHY_FEC_25G_KR_REQ; break; case ICE_FEC_RS: - /* Clear auto FEC and BASE-R bits, and AND RS ability + /* Clear BASE-R bits, and AND RS ability * bits and OR request bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | ICE_AQC_PHY_FEC_25G_RS_544_REQ; break; case ICE_FEC_NONE: - /* Clear auto FEC and all FEC option bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; + /* Clear all FEC option bits. */ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; break; case ICE_FEC_AUTO: @@ -3240,40 +3452,44 @@ void ice_replay_post(struct ice_hw *hw) /** * ice_stat_update40 - read 40 bit stat from the chip and update stat values * @hw: ptr to the hardware info - * @hireg: high 32 bit HW register to read from - * @loreg: low 32 bit HW register to read from + * @reg: offset of 64 bit HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ void -ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) +ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) { - u64 new_data; - - new_data = rd32(hw, loreg); - new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; + u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. */ - if (!prev_stat_loaded) + if (!prev_stat_loaded) { *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ if (new_data >= *prev_stat) - *cur_stat = new_data - *prev_stat; + *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; - *cur_stat &= 0xFFFFFFFFFFULL; + *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; } /** * ice_stat_update32 - read 32 bit stat from the chip and update stat values * @hw: ptr to the hardware info - * @reg: HW register to read from + * @reg: offset of HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value @@ -3287,17 +3503,26 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, new_data = rd32(hw, reg); /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. */ - if (!prev_stat_loaded) + if (!prev_stat_loaded) { *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ if (new_data >= *prev_stat) - *cur_stat = new_data - *prev_stat; + *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; + *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index d1f8353fe6bb..c3df92f57777 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -6,6 +6,7 @@ #include "ice.h" #include "ice_type.h" +#include "ice_flex_pipe.h" #include "ice_switch.h" #include <linux/avf/virtchnl.h> @@ -17,8 +18,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw); enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); +enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); void ice_shutdown_all_ctrlq(struct ice_hw *hw); +void ice_destroy_all_ctrlq(struct ice_hw *hw); enum ice_status ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); @@ -39,6 +42,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, void ice_clear_pxe_mode(struct ice_hw *hw); enum ice_status ice_get_caps(struct ice_hw *hw); +void ice_set_safe_mode_caps(struct ice_hw *hw); + void ice_dev_onetime_setup(struct ice_hw *hw); enum ice_status @@ -64,12 +69,18 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); extern const struct ice_ctx_ele ice_tlan_ctx_info[]; enum ice_status ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); + +extern struct mutex ice_global_cfg_lock_sw; + enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); enum ice_status +ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, + struct ice_sq_cd *cd); +enum ice_status ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); @@ -123,11 +134,14 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); void -ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat); void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +void +ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, + u8 *oem_patch, u8 *ver_hi, u8 *ver_lo); enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_get_elem *buf); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index e91ac4df0242..2353166c654e 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -310,7 +310,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @cq: pointer to the specific Control queue * * This is the main initialization routine for the Control Send Queue - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->sq_buf_size @@ -369,7 +369,7 @@ init_ctrlq_exit: * @cq: pointer to the specific Control queue * * The main initialization routine for the Admin Receive (Event) Queue. - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_rq_entries * - cq->rq_buf_size @@ -569,14 +569,8 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) return 0; init_ctrlq_free_rq: - if (cq->rq.count) { - ice_shutdown_rq(hw, cq); - mutex_destroy(&cq->rq_lock); - } - if (cq->sq.count) { - ice_shutdown_sq(hw, cq); - mutex_destroy(&cq->sq_lock); - } + ice_shutdown_rq(hw, cq); + ice_shutdown_sq(hw, cq); return status; } @@ -585,12 +579,14 @@ init_ctrlq_free_rq: * @hw: pointer to the hardware structure * @q_type: specific Control queue type * - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size + * + * NOTE: this function does not initialize the controlq locks */ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { @@ -616,8 +612,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) !cq->rq_buf_size || !cq->sq_buf_size) { return ICE_ERR_CFG; } - mutex_init(&cq->sq_lock); - mutex_init(&cq->rq_lock); /* setup SQ command write back timeout */ cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; @@ -625,7 +619,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) /* allocate the ATQ */ ret_code = ice_init_sq(hw, cq); if (ret_code) - goto init_ctrlq_destroy_locks; + return ret_code; /* allocate the ARQ */ ret_code = ice_init_rq(hw, cq); @@ -637,9 +631,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) init_ctrlq_free_sq: ice_shutdown_sq(hw, cq); -init_ctrlq_destroy_locks: - mutex_destroy(&cq->sq_lock); - mutex_destroy(&cq->rq_lock); return ret_code; } @@ -647,12 +638,14 @@ init_ctrlq_destroy_locks: * ice_init_all_ctrlq - main initialization routine for all control queues * @hw: pointer to the hardware structure * - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver MUST* set the following fields * in the cq->structure for all control queues: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size + * + * NOTE: this function does not initialize the controlq locks. */ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) { @@ -672,9 +665,47 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) } /** + * ice_init_ctrlq_locks - Initialize locks for a control queue + * @cq: pointer to the control queue + * + * Initializes the send and receive queue locks for a given control queue. + */ +static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) +{ + mutex_init(&cq->sq_lock); + mutex_init(&cq->rq_lock); +} + +/** + * ice_create_all_ctrlq - main initialization routine for all control queues + * @hw: pointer to the hardware structure + * + * Prior to calling this function, the driver *MUST* set the following fields + * in the cq->structure for all control queues: + * - cq->num_sq_entries + * - cq->num_rq_entries + * - cq->rq_buf_size + * - cq->sq_buf_size + * + * This function creates all the control queue locks and then calls + * ice_init_all_ctrlq. It should be called once during driver load. If the + * driver needs to re-initialize control queues at run time it should call + * ice_init_all_ctrlq instead. + */ +enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) +{ + ice_init_ctrlq_locks(&hw->adminq); + ice_init_ctrlq_locks(&hw->mailboxq); + + return ice_init_all_ctrlq(hw); +} + +/** * ice_shutdown_ctrlq - shutdown routine for any control queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type + * + * NOTE: this function does not destroy the control queue locks. */ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { @@ -693,19 +724,17 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) return; } - if (cq->sq.count) { - ice_shutdown_sq(hw, cq); - mutex_destroy(&cq->sq_lock); - } - if (cq->rq.count) { - ice_shutdown_rq(hw, cq); - mutex_destroy(&cq->rq_lock); - } + ice_shutdown_sq(hw, cq); + ice_shutdown_rq(hw, cq); } /** * ice_shutdown_all_ctrlq - shutdown routine for all control queues * @hw: pointer to the hardware structure + * + * NOTE: this function does not destroy the control queue locks. The driver + * may call this at runtime to shutdown and later restart control queues, such + * as in response to a reset event. */ void ice_shutdown_all_ctrlq(struct ice_hw *hw) { @@ -716,6 +745,37 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) } /** + * ice_destroy_ctrlq_locks - Destroy locks for a control queue + * @cq: pointer to the control queue + * + * Destroys the send and receive queue locks for a given control queue. + */ +static void +ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) +{ + mutex_destroy(&cq->sq_lock); + mutex_destroy(&cq->rq_lock); +} + +/** + * ice_destroy_all_ctrlq - exit routine for all control queues + * @hw: pointer to the hardware structure + * + * This function shuts down all the control queues and then destroys the + * control queue locks. It should be called once during driver unload. The + * driver should call ice_shutdown_all_ctrlq if it needs to shut down and + * reinitialize control queues, such as in response to a reset event. + */ +void ice_destroy_all_ctrlq(struct ice_hw *hw) +{ + /* shut down all the control queues first */ + ice_shutdown_all_ctrlq(hw); + + ice_destroy_ctrlq_locks(&hw->adminq); + ice_destroy_ctrlq_locks(&hw->mailboxq); +} + +/** * ice_clean_sq - cleans Admin send queue (ATQ) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index c2002ded65f6..dd7efff121bd 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -60,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes (0x0A01) */ -enum ice_status +static enum ice_status ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, struct ice_sq_cd *cd) { @@ -444,9 +444,15 @@ ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv, * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| * --------------------------------- */ - ice_for_each_traffic_class(i) + ice_for_each_traffic_class(i) { etscfg->tcbwtable[i] = buf[offset++]; + if (etscfg->prio_table[i] == ICE_CEE_PGID_STRICT) + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT; + else + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS; + } + /* Number of TCs supported (1 octet) */ etscfg->maxtcs = buf[offset]; } @@ -937,10 +943,11 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) /** * ice_init_dcb * @hw: pointer to the HW struct + * @enable_mib_change: enable MIB change event * * Update DCB configuration from the Firmware */ -enum ice_status ice_init_dcb(struct ice_hw *hw) +enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) { struct ice_port_info *pi = hw->port_info; enum ice_status ret = 0; @@ -954,7 +961,8 @@ enum ice_status ice_init_dcb(struct ice_hw *hw) pi->dcbx_status = ice_get_dcbx_status(hw); if (pi->dcbx_status == ICE_DCBX_STATUS_DONE || - pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS) { + pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS || + pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { /* Get current DCBX configuration */ ret = ice_get_dcb_cfg(pi); pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM); @@ -965,9 +973,39 @@ enum ice_status ice_init_dcb(struct ice_hw *hw) } /* Configure the LLDP MIB change event */ - ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); + if (enable_mib_change) { + ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); + if (!ret) + pi->is_sw_lldp = false; + } + + return ret; +} + +/** + * ice_cfg_lldp_mib_change + * @hw: pointer to the HW struct + * @ena_mib: enable/disable MIB change event + * + * Configure (disable/enable) MIB + */ +enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) +{ + struct ice_port_info *pi = hw->port_info; + enum ice_status ret; + + if (!hw->func_caps.common_cap.dcb) + return ICE_ERR_NOT_SUPPORTED; + + /* Get DCBX status */ + pi->dcbx_status = ice_get_dcbx_status(hw); + + if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) + return ICE_ERR_NOT_READY; + + ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); if (!ret) - pi->is_sw_lldp = false; + pi->is_sw_lldp = !ena_mib; return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index 522e1452abe2..ee138f9bdc7c 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -125,7 +125,7 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); -enum ice_status ice_init_dcb(struct ice_hw *hw); +enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); enum ice_status ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, @@ -139,9 +139,7 @@ ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); enum ice_status ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd); -enum ice_status -ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, - struct ice_sq_cd *cd); +enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); #else /* CONFIG_DCB */ static inline enum ice_status ice_aq_stop_lldp(struct ice_hw __always_unused *hw, @@ -172,9 +170,8 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, } static inline enum ice_status -ice_aq_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, - bool __always_unused ena_update, - struct ice_sq_cd __always_unused *cd) +ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, + bool __always_unused ena_mib) { return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index fe88b127ca42..dd47869c4ad4 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -4,6 +4,48 @@ #include "ice_dcb_lib.h" /** + * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration + * @vsi: the VSI being configured + * @ena_tc: TC map to be enabled + */ +void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) +{ + struct net_device *netdev = vsi->netdev; + struct ice_pf *pf = vsi->back; + struct ice_dcbx_cfg *dcbcfg; + u8 netdev_tc; + int i; + + if (!netdev) + return; + + if (!ena_tc) { + netdev_reset_tc(netdev); + return; + } + + if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) + return; + + dcbcfg = &pf->hw.port_info->local_dcbx_cfg; + + ice_for_each_traffic_class(i) + if (vsi->tc_cfg.ena_tc & BIT(i)) + netdev_set_tc_queue(netdev, + vsi->tc_cfg.tc_info[i].netdev_tc, + vsi->tc_cfg.tc_info[i].qcount_tx, + vsi->tc_cfg.tc_info[i].qoffset); + + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + u8 ets_tc = dcbcfg->etscfg.prio_table[i]; + + /* Get the mapped netdev TC# for the UP */ + netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; + netdev_set_prio_tc_map(netdev, i, netdev_tc); + } +} + +/** * ice_dcb_get_ena_tc - return bitmap of enabled TCs * @dcbcfg: DCB config to evaluate for enabled TCs */ @@ -204,15 +246,86 @@ out: } /** + * ice_cfg_etsrec_defaults - Set default ETS recommended DCB config + * @pi: port information structure + */ +static void ice_cfg_etsrec_defaults(struct ice_port_info *pi) +{ + struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg; + u8 i; + + /* Ensure ETS recommended DCB configuration is not already set */ + if (dcbcfg->etsrec.maxtcs) + return; + + /* In CEE mode, set the default to 1 TC */ + dcbcfg->etsrec.maxtcs = 1; + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + dcbcfg->etsrec.tcbwtable[i] = i ? 0 : 100; + dcbcfg->etsrec.tsatable[i] = i ? ICE_IEEE_TSA_STRICT : + ICE_IEEE_TSA_ETS; + } +} + +/** + * ice_dcb_need_recfg - Check if DCB needs reconfig + * @pf: board private structure + * @old_cfg: current DCB config + * @new_cfg: new DCB config + */ +static bool +ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, + struct ice_dcbx_cfg *new_cfg) +{ + bool need_reconfig = false; + + /* Check if ETS configuration has changed */ + if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, + sizeof(new_cfg->etscfg))) { + /* If Priority Table has changed reconfig is needed */ + if (memcmp(&new_cfg->etscfg.prio_table, + &old_cfg->etscfg.prio_table, + sizeof(new_cfg->etscfg.prio_table))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); + } + + if (memcmp(&new_cfg->etscfg.tcbwtable, + &old_cfg->etscfg.tcbwtable, + sizeof(new_cfg->etscfg.tcbwtable))) + dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); + + if (memcmp(&new_cfg->etscfg.tsatable, + &old_cfg->etscfg.tsatable, + sizeof(new_cfg->etscfg.tsatable))) + dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); + } + + /* Check if PFC configuration has changed */ + if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); + } + + /* Check if APP Table has changed */ + if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); + } + + dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); + return need_reconfig; +} + +/** * ice_dcb_rebuild - rebuild DCB post reset * @pf: physical function instance */ void ice_dcb_rebuild(struct ice_pf *pf) { + struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg; struct ice_aqc_port_ets_elem buf = { 0 }; - struct ice_dcbx_cfg *prev_cfg; enum ice_status ret; - u8 willing; ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { @@ -224,9 +337,15 @@ void ice_dcb_rebuild(struct ice_pf *pf) if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags)) return; + local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg; + desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg; + /* Save current willing state and force FW to unwilling */ - willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing; - pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0; + local_dcbx_cfg->etscfg.willing = 0x0; + local_dcbx_cfg->pfc.willing = 0x0; + local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING; + + ice_cfg_etsrec_defaults(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) { dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n"); @@ -234,31 +353,37 @@ void ice_dcb_rebuild(struct ice_pf *pf) } /* Retrieve DCB config and ensure same as current in SW */ - prev_cfg = devm_kmemdup(&pf->pdev->dev, - &pf->hw.port_info->local_dcbx_cfg, + prev_cfg = devm_kmemdup(&pf->pdev->dev, local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL); if (!prev_cfg) { dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n"); goto dcb_error; } - ice_init_dcb(&pf->hw); - if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg, - sizeof(*prev_cfg))) { + ice_init_dcb(&pf->hw, true); + if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS) + pf->hw.port_info->is_sw_lldp = true; + else + pf->hw.port_info->is_sw_lldp = false; + + if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) { /* difference in cfg detected - disable DCB till next MIB */ dev_err(&pf->pdev->dev, "Set local MIB not accurate\n"); - devm_kfree(&pf->pdev->dev, prev_cfg); goto dcb_error; } /* fetched config congruent to previous configuration */ devm_kfree(&pf->pdev->dev, prev_cfg); - /* Configuration replayed - reset willing state to previous */ - pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing; + /* Set the local desired config */ + if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE) + memcpy(local_dcbx_cfg, desired_dcbx_cfg, + sizeof(*local_dcbx_cfg)); + + ice_cfg_etsrec_defaults(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) { - dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n"); + dev_err(&pf->pdev->dev, "Failed to set desired config\n"); goto dcb_error; } dev_info(&pf->pdev->dev, "DCB restored after reset\n"); @@ -330,7 +455,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg)); dcbcfg->etscfg.willing = 1; - dcbcfg->etscfg.maxtcs = 8; + dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc; dcbcfg->etscfg.tcbwtable[0] = 100; dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; @@ -339,7 +464,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) dcbcfg->etsrec.willing = 0; dcbcfg->pfc.willing = 1; - dcbcfg->pfc.pfccap = IEEE_8021QAZ_MAX_TCS; + dcbcfg->pfc.pfccap = hw->func_caps.common_cap.maxtc; dcbcfg->numapps = 1; dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE; @@ -364,35 +489,24 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) struct device *dev = &pf->pdev->dev; struct ice_port_info *port_info; struct ice_hw *hw = &pf->hw; - int sw_default = 0; int err; port_info = hw->port_info; - err = ice_init_dcb(hw); - if (err) { - /* FW LLDP is not active, default to SW DCBX/LLDP */ - dev_info(&pf->pdev->dev, "FW LLDP is not active\n"); - hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; - hw->port_info->is_sw_lldp = true; - } - - if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS) - dev_info(&pf->pdev->dev, "DCBX disabled\n"); - - /* LLDP disabled in FW */ - if (port_info->is_sw_lldp) { - sw_default = 1; - dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n"); - clear_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); - } else { - set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); + err = ice_init_dcb(hw, false); + if (err && !port_info->is_sw_lldp) { + dev_err(&pf->pdev->dev, "Error initializing DCB %d\n", err); + goto dcb_init_err; } - if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) - dev_info(&pf->pdev->dev, "DCBX not started\n"); - - if (sw_default) { + dev_info(&pf->pdev->dev, + "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n", + pf->hw.func_caps.common_cap.maxtc); + if (err) { + /* FW LLDP is disabled, activate SW DCBX/LLDP mode */ + dev_info(&pf->pdev->dev, + "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); + clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); err = ice_dcb_sw_dflt_cfg(pf, locked); if (err) { dev_err(&pf->pdev->dev, @@ -402,21 +516,18 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) } pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; - set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - set_bit(ICE_FLAG_DCB_ENA, pf->flags); return 0; } + set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); + /* DCBX in FW and LLDP enabled in FW */ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; - set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - err = ice_dcb_init_cfg(pf, locked); if (err) goto dcb_init_err; - dev_info(&pf->pdev->dev, "DCBX offload supported\n"); return err; dcb_init_err: @@ -432,30 +543,31 @@ void ice_update_dcb_stats(struct ice_pf *pf) { struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw *hw = &pf->hw; - u8 pf_id = hw->pf_id; + u8 port; int i; + port = hw->port_info->lport; prev_ps = &pf->stats_prev; cur_ps = &pf->stats; for (i = 0; i < 8; i++) { - ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xoff_rx[i], &cur_ps->priority_xoff_rx[i]); - ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXONRXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xon_rx[i], &cur_ps->priority_xon_rx[i]); - ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXONTXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xon_tx[i], &cur_ps->priority_xon_tx[i]); - ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXOFFTXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xoff_tx[i], &cur_ps->priority_xoff_tx[i]); - ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i), + ice_stat_update32(hw, GLPRT_RXON2OFFCNT(port, i), pf->stat_prev_loaded, &prev_ps->priority_xon_2_xoff[i], &cur_ps->priority_xon_2_xoff[i]); @@ -502,55 +614,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, } /** - * ice_dcb_need_recfg - Check if DCB needs reconfig - * @pf: board private structure - * @old_cfg: current DCB config - * @new_cfg: new DCB config - */ -static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, - struct ice_dcbx_cfg *new_cfg) -{ - bool need_reconfig = false; - - /* Check if ETS configuration has changed */ - if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, - sizeof(new_cfg->etscfg))) { - /* If Priority Table has changed reconfig is needed */ - if (memcmp(&new_cfg->etscfg.prio_table, - &old_cfg->etscfg.prio_table, - sizeof(new_cfg->etscfg.prio_table))) { - need_reconfig = true; - dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); - } - - if (memcmp(&new_cfg->etscfg.tcbwtable, - &old_cfg->etscfg.tcbwtable, - sizeof(new_cfg->etscfg.tcbwtable))) - dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); - - if (memcmp(&new_cfg->etscfg.tsatable, - &old_cfg->etscfg.tsatable, - sizeof(new_cfg->etscfg.tsatable))) - dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); - } - - /* Check if PFC configuration has changed */ - if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { - need_reconfig = true; - dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); - } - - /* Check if APP Table has changed */ - if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { - need_reconfig = true; - dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); - } - - dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); - return need_reconfig; -} - -/** * ice_dcb_process_lldp_set_mib_change - Process MIB change * @pf: ptr to ice_pf * @event: pointer to the admin queue receive event diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 819081053ff5..661a6f7bca64 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -22,6 +22,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event); +void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { @@ -58,5 +59,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, #define ice_vsi_cfg_dcb_rings(vsi) do {} while (0) #define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0) #define ice_set_cgd_num(tlan_ctx, ring) do {} while (0) +#define ice_vsi_cfg_netdev_tc(vsi, ena_tc) do {} while (0) #endif /* CONFIG_DCB */ #endif /* _ICE_DCB_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 52083a63dee6..7e23034df955 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -155,36 +155,11 @@ struct ice_priv_flag { static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), - ICE_PRIV_FLAG("enable-fw-lldp", ICE_FLAG_ENABLE_FW_LLDP), + ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), }; #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) -/** - * ice_nvm_version_str - format the NVM version strings - * @hw: ptr to the hardware info - */ -static char *ice_nvm_version_str(struct ice_hw *hw) -{ - static char buf[ICE_ETHTOOL_FWVER_LEN]; - u8 ver, patch; - u32 full_ver; - u16 build; - - full_ver = hw->nvm.oem_ver; - ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT); - build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >> - ICE_OEM_VER_BUILD_SHIFT); - patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK); - - snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", - (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT, - (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT, - hw->nvm.eetrack, ver, build, patch); - - return buf; -} - static void ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { @@ -1201,13 +1176,13 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); - if (test_bit(ICE_FLAG_ENABLE_FW_LLDP, change_flags)) { - if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) { + if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { + if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { enum ice_status status; /* Disable FW LLDP engine */ - status = ice_aq_cfg_lldp_mib_change(&pf->hw, false, - NULL); + status = ice_cfg_lldp_mib_change(&pf->hw, false); + /* If unregistering for LLDP events fails, this is * not an error state, as there shouldn't be any * events to respond to. @@ -1273,6 +1248,12 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) * The FW LLDP engine will now be consuming them. */ ice_cfg_sw_lldp(vsi, false, false); + + /* Register for MIB change events */ + status = ice_cfg_lldp_mib_change(&pf->hw, true); + if (status) + dev_dbg(&pf->pdev->dev, + "Fail to enable MIB change events\n"); } } clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); @@ -1319,14 +1300,17 @@ ice_get_ethtool_stats(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_ring *ring; - unsigned int j = 0; + unsigned int j; int i = 0; char *p; + ice_update_pf_stats(pf); + ice_update_vsi_stats(vsi); + for (j = 0; j < ICE_VSI_STATS_LEN; j++) { p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset; data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } /* populate per queue stats */ @@ -1716,6 +1700,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_port_info *pi = np->vsi->port_info; struct ethtool_link_ksettings cap_ksettings; struct ice_link_status *link_info; struct ice_vsi *vsi = np->vsi; @@ -2040,6 +2025,33 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, break; } ks->base.duplex = DUPLEX_FULL; + + if (link_info->an_info & ICE_AQ_AN_COMPLETED) + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Autoneg); + + /* Set flow control negotiated Rx/Tx pause */ + switch (pi->fc.current_mode) { + case ICE_FC_FULL: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); + break; + case ICE_FC_TX_PAUSE: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + break; + case ICE_FC_RX_PAUSE: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + break; + case ICE_FC_PFC: + /* fall through */ + default: + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause); + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, + Asym_Pause); + break; + } } /** @@ -2078,9 +2090,12 @@ ice_get_link_ksettings(struct net_device *netdev, struct ice_aqc_get_phy_caps_data *caps; struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; + enum ice_status status; + int err = 0; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); hw_link_info = &vsi->port_info->phy.link_info; /* set speed and duplex */ @@ -2125,48 +2140,36 @@ ice_get_link_ksettings(struct net_device *netdev, /* flow control is symmetric and always supported */ ethtool_link_ksettings_add_link_mode(ks, supported, Pause); - switch (vsi->port_info->fc.req_mode) { - case ICE_FC_FULL: + caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + if (!caps) + return -ENOMEM; + + status = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_SW_CFG, caps, NULL); + if (status) { + err = -EIO; + goto done; + } + + /* Set the advertised flow control based on the PHY capability */ + if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && + (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) { ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); - break; - case ICE_FC_TX_PAUSE: ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); - break; - case ICE_FC_RX_PAUSE: + } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) { ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); - break; - case ICE_FC_PFC: - default: + } else { ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); ethtool_link_ksettings_del_link_mode(ks, advertising, Asym_Pause); - break; } - caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); - if (!caps) - goto done; - - if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP, - caps, NULL)) - netdev_info(netdev, "Get phy capability failed.\n"); - - /* Set supported FEC modes based on PHY capability */ - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - - if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || - caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - - if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG, - caps, NULL)) - netdev_info(netdev, "Get phy capability failed.\n"); - /* Set advertised FEC modes based on PHY capability */ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); @@ -2178,9 +2181,25 @@ ice_get_link_ksettings(struct net_device *netdev, caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + status = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_TOPO_CAP, caps, NULL); + if (status) { + err = -EIO; + goto done; + } + + /* Set supported FEC modes based on PHY capability */ + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + + if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + done: devm_kfree(&vsi->back->pdev->dev, caps); - return 0; + return err; } /** @@ -2763,6 +2782,11 @@ static int ice_nway_reset(struct net_device *netdev) * ice_get_pauseparam - Get Flow Control status * @netdev: network interface device structure * @pause: ethernet pause (flow control) parameters + * + * Get requested flow control status from PHY capability. + * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which + * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report + * the negotiated Rx/Tx pause via lp_advertising. */ static void ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) @@ -2816,6 +2840,7 @@ static int ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_aqc_get_phy_caps_data *pcaps; struct ice_link_status *hw_link_info; struct ice_pf *pf = np->vsi->back; struct ice_dcbx_cfg *dcbx_cfg; @@ -2826,6 +2851,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) u8 aq_failures; bool link_up; int err = 0; + u32 is_an; pi = vsi->port_info; hw_link_info = &pi->phy.link_info; @@ -2840,7 +2866,30 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -EOPNOTSUPP; } - if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { + /* Get pause param reports configured and negotiated flow control pause + * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is + * defined get pause param pause->autoneg reports SW configured setting, + * so compare pause->autoneg with SW configured to prevent the user from + * using set pause param to chance autoneg. + */ + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + /* Get current PHY config */ + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (status) { + kfree(pcaps); + return -EIO; + } + + is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + + kfree(pcaps); + + if (pause->autoneg != is_an) { netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); return -EOPNOTSUPP; } @@ -3146,12 +3195,6 @@ __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, if (ice_get_q_coalesce(vsi, ec, q_num)) return -EINVAL; - if (q_num < vsi->num_txq) - ec->tx_max_coalesced_frames_irq = vsi->work_lmt; - - if (q_num < vsi->num_rxq) - ec->rx_max_coalesced_frames_irq = vsi->work_lmt; - return 0; } @@ -3185,25 +3228,25 @@ static int ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, struct ice_ring_container *rc, struct ice_vsi *vsi) { + const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx"; + u32 use_adaptive_coalesce, coalesce_usecs; struct ice_pf *pf = vsi->back; u16 itr_setting; if (!rc->ring) return -EINVAL; - itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC; - switch (c_type) { case ICE_RX_CONTAINER: if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL || (ec->rx_coalesce_usecs_high && ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) { netdev_info(vsi->netdev, - "Invalid value, rx-usecs-high valid values are 0 (disabled), %d-%d\n", - pf->hw.intrl_gran, ICE_MAX_INTRL); + "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n", + c_type_str, pf->hw.intrl_gran, + ICE_MAX_INTRL); return -EINVAL; } - if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) { rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high; wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx), @@ -3211,60 +3254,60 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, pf->hw.intrl_gran)); } - if (ec->rx_coalesce_usecs != itr_setting && - ec->use_adaptive_rx_coalesce) { - netdev_info(vsi->netdev, - "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); - return -EINVAL; - } + use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; + coalesce_usecs = ec->rx_coalesce_usecs; - if (ec->rx_coalesce_usecs > ICE_ITR_MAX) { - netdev_info(vsi->netdev, - "Invalid value, rx-usecs range is 0-%d\n", - ICE_ITR_MAX); - return -EINVAL; - } - - if (ec->use_adaptive_rx_coalesce) { - rc->itr_setting |= ICE_ITR_DYNAMIC; - } else { - rc->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); - rc->target_itr = ITR_TO_REG(rc->itr_setting); - } break; case ICE_TX_CONTAINER: if (ec->tx_coalesce_usecs_high) { netdev_info(vsi->netdev, - "setting tx-usecs-high is not supported\n"); + "setting %s-usecs-high is not supported\n", + c_type_str); return -EINVAL; } - if (ec->tx_coalesce_usecs != itr_setting && - ec->use_adaptive_tx_coalesce) { - netdev_info(vsi->netdev, - "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); - return -EINVAL; - } + use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; + coalesce_usecs = ec->tx_coalesce_usecs; - if (ec->tx_coalesce_usecs > ICE_ITR_MAX) { - netdev_info(vsi->netdev, - "Invalid value, tx-usecs range is 0-%d\n", - ICE_ITR_MAX); - return -EINVAL; - } - - if (ec->use_adaptive_tx_coalesce) { - rc->itr_setting |= ICE_ITR_DYNAMIC; - } else { - rc->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); - rc->target_itr = ITR_TO_REG(rc->itr_setting); - } break; default: dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type); return -EINVAL; } + itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC; + if (coalesce_usecs != itr_setting && use_adaptive_coalesce) { + netdev_info(vsi->netdev, + "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n", + c_type_str, c_type_str); + return -EINVAL; + } + + if (coalesce_usecs > ICE_ITR_MAX) { + netdev_info(vsi->netdev, + "Invalid value, %s-usecs range is 0-%d\n", + c_type_str, ICE_ITR_MAX); + return -EINVAL; + } + + /* hardware only supports an ITR granularity of 2us */ + if (coalesce_usecs % 2 != 0) { + netdev_info(vsi->netdev, + "Invalid value, %s-usecs must be even\n", + c_type_str); + return -EINVAL; + } + + if (use_adaptive_coalesce) { + rc->itr_setting |= ICE_ITR_DYNAMIC; + } else { + /* store user facing value how it was set */ + rc->itr_setting = coalesce_usecs; + /* set to static and convert to value HW understands */ + rc->target_itr = + ITR_TO_REG(ITR_REG_ALIGN(rc->itr_setting)); + } + return 0; } @@ -3331,17 +3374,13 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, if (ice_set_q_coalesce(vsi, ec, i)) return -EINVAL; } - goto set_work_lmt; + goto set_complete; } if (ice_set_q_coalesce(vsi, ec, q_num)) return -EINVAL; -set_work_lmt: - - if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) - vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq, - ec->rx_max_coalesced_frames_irq); +set_complete: return 0; } @@ -3396,6 +3435,33 @@ static const struct ethtool_ops ice_ethtool_ops = { .set_fecparam = ice_set_fecparam, }; +static const struct ethtool_ops ice_ethtool_safe_mode_ops = { + .get_link_ksettings = ice_get_link_ksettings, + .set_link_ksettings = ice_set_link_ksettings, + .get_drvinfo = ice_get_drvinfo, + .get_regs_len = ice_get_regs_len, + .get_regs = ice_get_regs, + .get_msglevel = ice_get_msglevel, + .set_msglevel = ice_set_msglevel, + .get_eeprom_len = ice_get_eeprom_len, + .get_eeprom = ice_get_eeprom, + .get_strings = ice_get_strings, + .get_ethtool_stats = ice_get_ethtool_stats, + .get_sset_count = ice_get_sset_count, + .get_ringparam = ice_get_ringparam, + .set_ringparam = ice_set_ringparam, + .nway_reset = ice_nway_reset, +}; + +/** + * ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops + * @netdev: network interface device structure + */ +void ice_set_ethtool_safe_mode_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ice_ethtool_safe_mode_ops; +} + /** * ice_set_ethtool_ops - setup netdev ethtool ops * @netdev: network interface device structure diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c new file mode 100644 index 000000000000..cbd53b586c36 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -0,0 +1,1549 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_flex_pipe.h" + +/** + * ice_pkg_val_buf + * @buf: pointer to the ice buffer + * + * This helper function validates a buffer's header. + */ +static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +{ + struct ice_buf_hdr *hdr; + u16 section_count; + u16 data_end; + + hdr = (struct ice_buf_hdr *)buf->buf; + /* verify data */ + section_count = le16_to_cpu(hdr->section_count); + if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) + return NULL; + + data_end = le16_to_cpu(hdr->data_end); + if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) + return NULL; + + return hdr; +} + +/** + * ice_find_buf_table + * @ice_seg: pointer to the ice segment + * + * Returns the address of the buffer table within the ice segment. + */ +static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) +{ + struct ice_nvm_table *nvms; + + nvms = (struct ice_nvm_table *) + (ice_seg->device_table + + le32_to_cpu(ice_seg->device_table_count)); + + return (__force struct ice_buf_table *) + (nvms->vers + le32_to_cpu(nvms->table_count)); +} + +/** + * ice_pkg_enum_buf + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This function will enumerate all the buffers in the ice segment. The first + * call is made with the ice_seg parameter non-NULL; on subsequent calls, + * ice_seg is set to NULL which continues the enumeration. When the function + * returns a NULL pointer, then the end of the buffers has been reached, or an + * unexpected value has been detected (for example an invalid section count or + * an invalid buffer end value). + */ +static struct ice_buf_hdr * +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (ice_seg) { + state->buf_table = ice_find_buf_table(ice_seg); + if (!state->buf_table) + return NULL; + + state->buf_idx = 0; + return ice_pkg_val_buf(state->buf_table->buf_array); + } + + if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) + return ice_pkg_val_buf(state->buf_table->buf_array + + state->buf_idx); + else + return NULL; +} + +/** + * ice_pkg_advance_sect + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This helper function will advance the section within the ice segment, + * also advancing the buffer if needed. + */ +static bool +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (!ice_seg && !state->buf) + return false; + + if (!ice_seg && state->buf) + if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) + return true; + + state->buf = ice_pkg_enum_buf(ice_seg, state); + if (!state->buf) + return false; + + /* start of new buffer, reset section index */ + state->sect_idx = 0; + return true; +} + +/** + * ice_pkg_enum_section + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * + * This function will enumerate all the sections of a particular type in the + * ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the matching + * sections has been reached. + */ +static void * +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type) +{ + u16 offset, size; + + if (ice_seg) + state->type = sect_type; + + if (!ice_pkg_advance_sect(ice_seg, state)) + return NULL; + + /* scan for next matching section */ + while (state->buf->section_entry[state->sect_idx].type != + cpu_to_le32(state->type)) + if (!ice_pkg_advance_sect(NULL, state)) + return NULL; + + /* validate section */ + offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) + return NULL; + + size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) + return NULL; + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) + return NULL; + + state->sect_type = + le32_to_cpu(state->buf->section_entry[state->sect_idx].type); + + /* calc pointer to this section */ + state->sect = ((u8 *)state->buf) + + le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + + return state->sect; +} + +/** + * ice_acquire_global_cfg_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the global config lock for reading + * or writing of the package. When attempting to obtain write access, the + * caller must check for the following two return values: + * + * ICE_SUCCESS - Means the caller has acquired the global config lock + * and can perform writing of the package. + * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +static enum ice_status +ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access) +{ + enum ice_status status; + + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + + if (!status) + mutex_lock(&ice_global_cfg_lock_sw); + else if (status == ICE_ERR_AQ_NO_WORK) + ice_debug(hw, ICE_DBG_PKG, + "Global config lock: No work to do\n"); + + return status; +} + +/** + * ice_release_global_cfg_lock + * @hw: pointer to the HW structure + * + * This function will release the global config lock. + */ +static void ice_release_global_cfg_lock(struct ice_hw *hw) +{ + mutex_unlock(&ice_global_cfg_lock_sw); + ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); +} + +/** + * ice_aq_download_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer to transfer + * @buf_size: the size of the package buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Download Package (0x0C40) + */ +static enum ice_status +ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == ICE_ERR_AQ_ERROR) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * ice_find_seg_in_pkg + * @hw: pointer to the hardware structure + * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + */ +static struct ice_generic_seg_hdr * +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr) +{ + u32 i; + + ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", + pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor, + pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft); + + /* Search all package segments for the requested segment type */ + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + struct ice_generic_seg_hdr *seg; + + seg = (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i])); + + if (le32_to_cpu(seg->seg_type) == seg_type) + return seg; + } + + return NULL; +} + +/** + * ice_dwnld_cfg_bufs + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. Metadata buffers are skipped, and the first metadata buffer + * found indicates that the rest of the buffers are all metadata buffers. + */ +static enum ice_status +ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status; + struct ice_buf_hdr *bh; + u32 offset, info, i; + + if (!bufs || !count) + return ICE_ERR_PARAM; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + return 0; + + /* reset pkg_dwnld_status in case this function is called in the + * reset/rebuild flow + */ + hw->pkg_dwnld_status = ICE_AQ_RC_OK; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == ICE_ERR_AQ_NO_WORK) + hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; + else + hw->pkg_dwnld_status = hw->adminq.sq_last_status; + return status; + } + + for (i = 0; i < count; i++) { + bool last = ((i + 1) == count); + + if (!last) { + /* check next buffer for metadata flag */ + bh = (struct ice_buf_hdr *)(bufs + i + 1); + + /* A set metadata flag in the next buffer will signal + * that the current buffer will be the last buffer + * downloaded + */ + if (le16_to_cpu(bh->section_count)) + if (le32_to_cpu(bh->section_entry[0].type) & + ICE_METADATA_BUF) + last = true; + } + + bh = (struct ice_buf_hdr *)(bufs + i); + + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, + &offset, &info, NULL); + + /* Save AQ status from download package */ + hw->pkg_dwnld_status = hw->adminq.sq_last_status; + if (status) { + ice_debug(hw, ICE_DBG_PKG, + "Pkg download failed: err %d off %d inf %d\n", + status, offset, info); + + break; + } + + if (last) + break; + } + + ice_release_global_cfg_lock(hw); + + return status; +} + +/** + * ice_aq_get_pkg_info_list + * @hw: pointer to the hardware structure + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer + * @cd: pointer to command details structure or NULL + * + * Get Package Info List (0x0C43) + */ +static enum ice_status +ice_aq_get_pkg_info_list(struct ice_hw *hw, + struct ice_aqc_get_pkg_info_resp *pkg_info, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_status +ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_buf_table *ice_buf_tbl; + + ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n", + ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor, + ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft); + + ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", + le32_to_cpu(ice_seg->hdr.seg_type), + le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name); + + ice_buf_tbl = ice_find_buf_table(ice_seg); + + ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", + le32_to_cpu(ice_buf_tbl->buf_count)); + + return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + le32_to_cpu(ice_buf_tbl->buf_count)); +} + +/** + * ice_init_pkg_info + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + * + * Saves off the package details into the HW structure. + */ +static enum ice_status +ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_global_metadata_seg *meta_seg; + struct ice_generic_seg_hdr *seg_hdr; + + if (!pkg_hdr) + return ICE_ERR_PARAM; + + meta_seg = (struct ice_global_metadata_seg *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr); + if (meta_seg) { + hw->pkg_ver = meta_seg->pkg_ver; + memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name)); + + ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", + meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor, + meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, + meta_seg->pkg_name); + } else { + ice_debug(hw, ICE_DBG_INIT, + "Did not find metadata segment in driver package\n"); + return ICE_ERR_CFG; + } + + seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); + if (seg_hdr) { + hw->ice_pkg_ver = seg_hdr->seg_ver; + memcpy(hw->ice_pkg_name, seg_hdr->seg_name, + sizeof(hw->ice_pkg_name)); + + ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n", + seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor, + seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft, + seg_hdr->seg_name); + } else { + ice_debug(hw, ICE_DBG_INIT, + "Did not find ice segment in driver package\n"); + return ICE_ERR_CFG; + } + + return 0; +} + +/** + * ice_get_pkg_info + * @hw: pointer to the hardware structure + * + * Store details of the package currently loaded in HW into the HW structure. + */ +static enum ice_status ice_get_pkg_info(struct ice_hw *hw) +{ + struct ice_aqc_get_pkg_info_resp *pkg_info; + enum ice_status status; + u16 size; + u32 i; + + size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) * + (ICE_PKG_CNT - 1)); + pkg_info = kzalloc(size, GFP_KERNEL); + if (!pkg_info) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); + if (status) + goto init_pkg_free_alloc; + + for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { +#define ICE_PKG_FLAG_COUNT 4 + char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; + u8 place = 0; + + if (pkg_info->pkg_info[i].is_active) { + flags[place++] = 'A'; + hw->active_pkg_ver = pkg_info->pkg_info[i].ver; + memcpy(hw->active_pkg_name, + pkg_info->pkg_info[i].name, + sizeof(hw->active_pkg_name)); + hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; + } + if (pkg_info->pkg_info[i].is_active_at_boot) + flags[place++] = 'B'; + if (pkg_info->pkg_info[i].is_modified) + flags[place++] = 'M'; + if (pkg_info->pkg_info[i].is_in_nvm) + flags[place++] = 'N'; + + ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", + i, pkg_info->pkg_info[i].ver.major, + pkg_info->pkg_info[i].ver.minor, + pkg_info->pkg_info[i].ver.update, + pkg_info->pkg_info[i].ver.draft, + pkg_info->pkg_info[i].name, flags); + } + +init_pkg_free_alloc: + kfree(pkg_info); + + return status; +} + +/** + * ice_verify_pkg - verify package + * @pkg: pointer to the package buffer + * @len: size of the package buffer + * + * Verifies various attributes of the package file, including length, format + * version, and the requirement of at least one segment. + */ +static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +{ + u32 seg_count; + u32 i; + + if (len < sizeof(*pkg)) + return ICE_ERR_BUF_TOO_SHORT; + + if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ || + pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR || + pkg->format_ver.update != ICE_PKG_FMT_VER_UPD || + pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT) + return ICE_ERR_CFG; + + /* pkg must have at least one segment */ + seg_count = le32_to_cpu(pkg->seg_count); + if (seg_count < 1) + return ICE_ERR_CFG; + + /* make sure segment array fits in package length */ + if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset))) + return ICE_ERR_BUF_TOO_SHORT; + + /* all segments must fit within length */ + for (i = 0; i < seg_count; i++) { + u32 off = le32_to_cpu(pkg->seg_offset[i]); + struct ice_generic_seg_hdr *seg; + + /* segment header must fit */ + if (len < off + sizeof(*seg)) + return ICE_ERR_BUF_TOO_SHORT; + + seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + + /* segment body must fit */ + if (len < off + le32_to_cpu(seg->seg_size)) + return ICE_ERR_BUF_TOO_SHORT; + } + + return 0; +} + +/** + * ice_free_seg - free package segment pointer + * @hw: pointer to the hardware structure + * + * Frees the package segment pointer in the proper manner, depending on if the + * segment was allocated or just the passed in pointer was stored. + */ +void ice_free_seg(struct ice_hw *hw) +{ + if (hw->pkg_copy) { + devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); + hw->pkg_copy = NULL; + hw->pkg_size = 0; + } + hw->seg = NULL; +} + +/** + * ice_init_pkg_regs - initialize additional package registers + * @hw: pointer to the hardware structure + */ +static void ice_init_pkg_regs(struct ice_hw *hw) +{ +#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF +#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF +#define ICE_SW_BLK_IDX 0 + + /* setup Switch block input mask, which is 48-bits in two parts */ + wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); + wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); +} + +/** + * ice_chk_pkg_version - check package version for compatibility with driver + * @pkg_ver: pointer to a version structure to check + * + * Check to make sure that the package about to be downloaded is compatible with + * the driver. To be compatible, the major and minor components of the package + * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR + * definitions. + */ +static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +{ + if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || + pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) + return ICE_ERR_NOT_SUPPORTED; + + return 0; +} + +/** + * ice_init_pkg - initialize/download package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function initializes a package. The package contains HW tables + * required to do packet processing. First, the function extracts package + * information such as version. Then it finds the ice configuration segment + * within the package; this function then saves a copy of the segment pointer + * within the supplied package buffer. Next, the function will cache any hints + * from the package, followed by downloading the package itself. Note, that if + * a previous PF driver has already downloaded the package successfully, then + * the current driver will not have to download the package again. + * + * The local package contents will be used to query default behavior and to + * update specific sections of the HW's version of the package (e.g. to update + * the parse graph to understand new protocols). + * + * This function stores a pointer to the package buffer memory, and it is + * expected that the supplied buffer will not be freed immediately. If the + * package buffer needs to be freed, such as when read from a file, use + * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this + * case. + */ +enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +{ + struct ice_pkg_hdr *pkg; + enum ice_status status; + struct ice_seg *seg; + + if (!buf || !len) + return ICE_ERR_PARAM; + + pkg = (struct ice_pkg_hdr *)buf; + status = ice_verify_pkg(pkg, len); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + status); + return status; + } + + /* initialize package info */ + status = ice_init_pkg_info(hw, pkg); + if (status) + return status; + + /* before downloading the package, check package version for + * compatibility with driver + */ + status = ice_chk_pkg_version(&hw->pkg_ver); + if (status) + return status; + + /* find segment in given package */ + seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg); + if (!seg) { + ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); + return ICE_ERR_CFG; + } + + /* download package */ + status = ice_download_pkg(hw, seg); + if (status == ICE_ERR_AQ_NO_WORK) { + ice_debug(hw, ICE_DBG_INIT, + "package previously loaded - no work.\n"); + status = 0; + } + + /* Get information on the package currently loaded in HW, then make sure + * the driver is compatible with this version. + */ + if (!status) { + status = ice_get_pkg_info(hw); + if (!status) + status = ice_chk_pkg_version(&hw->active_pkg_ver); + } + + if (!status) { + hw->seg = seg; + /* on successful package download update other required + * registers to support the package and fill HW tables + * with package content. + */ + ice_init_pkg_regs(hw); + ice_fill_blk_tbls(hw); + } else { + ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", + status); + } + + return status; +} + +/** + * ice_copy_and_init_pkg - initialize/download a copy of the package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function copies the package buffer, and then calls ice_init_pkg() to + * initialize the copied package contents. + * + * The copying is necessary if the package buffer supplied is constant, or if + * the memory may disappear shortly after calling this function. + * + * If the package buffer resides in the data segment and can be modified, the + * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). + * + * However, if the package buffer needs to be copied first, such as when being + * read from a file, the caller should use ice_copy_and_init_pkg(). + * + * This function will first copy the package buffer, before calling + * ice_init_pkg(). The caller is free to immediately destroy the original + * package buffer, as the new copy will be managed by this function and + * related routines. + */ +enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) +{ + enum ice_status status; + u8 *buf_copy; + + if (!buf || !len) + return ICE_ERR_PARAM; + + buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); + + status = ice_init_pkg(hw, buf_copy, len); + if (status) { + /* Free the copy, since we failed to initialize the package */ + devm_kfree(ice_hw_to_dev(hw), buf_copy); + } else { + /* Track the copied pkg so we can free it later */ + hw->pkg_copy = buf_copy; + hw->pkg_size = len; + } + + return status; +} + +/* PTG Management */ + +/** + * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to search for + * @ptg: pointer to variable that receives the PTG + * + * This function will search the PTGs for a particular ptype, returning the + * PTG ID that contains it through the PTG parameter, with the value of + * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. + */ +static enum ice_status +ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) +{ + if (ptype >= ICE_XLT1_CNT || !ptg) + return ICE_ERR_PARAM; + + *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; + return 0; +} + +/** + * ice_ptg_alloc_val - Allocates a new packet type group ID by value + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptg: the PTG to allocate + * + * This function allocates a given packet type group ID specified by the PTG + * parameter. + */ +static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) +{ + hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; +} + +/** + * ice_ptg_remove_ptype - Removes ptype from a particular packet type group + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to remove + * @ptg: the PTG to remove the ptype from + * + * This function will remove the ptype from the specific PTG, and move it to + * the default PTG (ICE_DEFAULT_PTG). + */ +static enum ice_status +ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) +{ + struct ice_ptg_ptype **ch; + struct ice_ptg_ptype *p; + + if (ptype > ICE_XLT1_CNT - 1) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) + return ICE_ERR_DOES_NOT_EXIST; + + /* Should not happen if .in_use is set, bad config */ + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) + return ICE_ERR_CFG; + + /* find the ptype within this PTG, and bypass the link over it */ + p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + while (p) { + if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { + *ch = p->next_ptype; + break; + } + + ch = &p->next_ptype; + p = p->next_ptype; + } + + hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; + hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; + + return 0; +} + +/** + * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to add or move + * @ptg: the PTG to add or move the ptype to + * + * This function will either add or move a ptype to a particular PTG depending + * on if the ptype is already part of another group. Note that using a + * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the + * default PTG. + */ +static enum ice_status +ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) +{ + enum ice_status status; + u8 original_ptg; + + if (ptype > ICE_XLT1_CNT - 1) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) + return ICE_ERR_DOES_NOT_EXIST; + + status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); + if (status) + return status; + + /* Is ptype already in the correct PTG? */ + if (original_ptg == ptg) + return 0; + + /* Remove from original PTG and move back to the default PTG */ + if (original_ptg != ICE_DEFAULT_PTG) + ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); + + /* Moving to default PTG? Then we're done with this request */ + if (ptg == ICE_DEFAULT_PTG) + return 0; + + /* Add ptype to PTG at beginning of list */ + hw->blk[blk].xlt1.ptypes[ptype].next_ptype = + hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = + &hw->blk[blk].xlt1.ptypes[ptype]; + + hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; + hw->blk[blk].xlt1.t[ptype] = ptg; + + return 0; +} + +/* Block / table size info */ +struct ice_blk_size_details { + u16 xlt1; /* # XLT1 entries */ + u16 xlt2; /* # XLT2 entries */ + u16 prof_tcam; /* # profile ID TCAM entries */ + u16 prof_id; /* # profile IDs */ + u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ + u16 prof_redir; /* # profile redirection entries */ + u16 es; /* # extraction sequence entries */ + u16 fvw; /* # field vector words */ + u8 overwrite; /* overwrite existing entries allowed */ + u8 reverse; /* reverse FV order */ +}; + +static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { + /** + * Table Definitions + * XLT1 - Number of entries in XLT1 table + * XLT2 - Number of entries in XLT2 table + * TCAM - Number of entries Profile ID TCAM table + * CDID - Control Domain ID of the hardware block + * PRED - Number of entries in the Profile Redirection Table + * FV - Number of entries in the Field Vector + * FVW - Width (in WORDs) of the Field Vector + * OVR - Overwrite existing table entries + * REV - Reverse FV + */ + /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ + /* Overwrite , Reverse FV */ + /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, + false, false }, + /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, + false, false }, + /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, + false, true }, + /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, + true, true }, + /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, + false, false }, +}; + +enum ice_sid_all { + ICE_SID_XLT1_OFF = 0, + ICE_SID_XLT2_OFF, + ICE_SID_PR_OFF, + ICE_SID_PR_REDIR_OFF, + ICE_SID_ES_OFF, + ICE_SID_OFF_COUNT, +}; + +/* VSIG Management */ + +/** + * ice_vsig_find_vsi - find a VSIG that contains a specified VSI + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI of interest + * @vsig: pointer to receive the VSI group + * + * This function will lookup the VSI entry in the XLT2 list and return + * the VSI group its associated with. + */ +static enum ice_status +ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) +{ + if (!vsig || vsi >= ICE_MAX_VSI) + return ICE_ERR_PARAM; + + /* As long as there's a default or valid VSIG associated with the input + * VSI, the functions returns a success. Any handling of VSIG will be + * done by the following add, update or remove functions. + */ + *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; + + return 0; +} + +/** + * ice_vsig_alloc_val - allocate a new VSIG by value + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsig: the VSIG to allocate + * + * This function will allocate a given VSIG specified by the VSIG parameter. + */ +static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) +{ + u16 idx = vsig & ICE_VSIG_IDX_M; + + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { + INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); + hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; + } + + return ICE_VSIG_VALUE(idx, hw->pf_id); +} + +/** + * ice_vsig_remove_vsi - remove VSI from VSIG + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI to remove + * @vsig: VSI group to remove from + * + * The function will remove the input VSI from its VSI group and move it + * to the DEFAULT_VSIG. + */ +static enum ice_status +ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) +{ + struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; + u16 idx; + + idx = vsig & ICE_VSIG_IDX_M; + + if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) + return ICE_ERR_DOES_NOT_EXIST; + + /* entry already in default VSIG, don't have to remove */ + if (idx == ICE_DEFAULT_VSIG) + return 0; + + vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; + if (!(*vsi_head)) + return ICE_ERR_CFG; + + vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; + vsi_cur = (*vsi_head); + + /* iterate the VSI list, skip over the entry to be removed */ + while (vsi_cur) { + if (vsi_tgt == vsi_cur) { + (*vsi_head) = vsi_cur->next_vsi; + break; + } + vsi_head = &vsi_cur->next_vsi; + vsi_cur = vsi_cur->next_vsi; + } + + /* verify if VSI was removed from group list */ + if (!vsi_cur) + return ICE_ERR_DOES_NOT_EXIST; + + vsi_cur->vsig = ICE_DEFAULT_VSIG; + vsi_cur->changed = 1; + vsi_cur->next_vsi = NULL; + + return 0; +} + +/** + * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI to move + * @vsig: destination VSI group + * + * This function will move or add the input VSI to the target VSIG. + * The function will find the original VSIG the VSI belongs to and + * move the entry to the DEFAULT_VSIG, update the original VSIG and + * then move entry to the new VSIG. + */ +static enum ice_status +ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) +{ + struct ice_vsig_vsi *tmp; + enum ice_status status; + u16 orig_vsig, idx; + + idx = vsig & ICE_VSIG_IDX_M; + + if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) + return ICE_ERR_PARAM; + + /* if VSIG not in use and VSIG is not default type this VSIG + * doesn't exist. + */ + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && + vsig != ICE_DEFAULT_VSIG) + return ICE_ERR_DOES_NOT_EXIST; + + status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); + if (status) + return status; + + /* no update required if vsigs match */ + if (orig_vsig == vsig) + return 0; + + if (orig_vsig != ICE_DEFAULT_VSIG) { + /* remove entry from orig_vsig and add to default VSIG */ + status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); + if (status) + return status; + } + + if (idx == ICE_DEFAULT_VSIG) + return 0; + + /* Create VSI entry and add VSIG and prop_mask values */ + hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; + hw->blk[blk].xlt2.vsis[vsi].changed = 1; + + /* Add new entry to the head of the VSIG list */ + tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; + hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = + &hw->blk[blk].xlt2.vsis[vsi]; + hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; + hw->blk[blk].xlt2.t[vsi] = vsig; + + return 0; +} + +/* Block / table section IDs */ +static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { + /* SWITCH */ + { ICE_SID_XLT1_SW, + ICE_SID_XLT2_SW, + ICE_SID_PROFID_TCAM_SW, + ICE_SID_PROFID_REDIR_SW, + ICE_SID_FLD_VEC_SW + }, + + /* ACL */ + { ICE_SID_XLT1_ACL, + ICE_SID_XLT2_ACL, + ICE_SID_PROFID_TCAM_ACL, + ICE_SID_PROFID_REDIR_ACL, + ICE_SID_FLD_VEC_ACL + }, + + /* FD */ + { ICE_SID_XLT1_FD, + ICE_SID_XLT2_FD, + ICE_SID_PROFID_TCAM_FD, + ICE_SID_PROFID_REDIR_FD, + ICE_SID_FLD_VEC_FD + }, + + /* RSS */ + { ICE_SID_XLT1_RSS, + ICE_SID_XLT2_RSS, + ICE_SID_PROFID_TCAM_RSS, + ICE_SID_PROFID_REDIR_RSS, + ICE_SID_FLD_VEC_RSS + }, + + /* PE */ + { ICE_SID_XLT1_PE, + ICE_SID_XLT2_PE, + ICE_SID_PROFID_TCAM_PE, + ICE_SID_PROFID_REDIR_PE, + ICE_SID_FLD_VEC_PE + } +}; + +/** + * ice_init_sw_xlt1_db - init software XLT1 database from HW tables + * @hw: pointer to the hardware structure + * @blk: the HW block to initialize + */ +static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) +{ + u16 pt; + + for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { + u8 ptg; + + ptg = hw->blk[blk].xlt1.t[pt]; + if (ptg != ICE_DEFAULT_PTG) { + ice_ptg_alloc_val(hw, blk, ptg); + ice_ptg_add_mv_ptype(hw, blk, pt, ptg); + } + } +} + +/** + * ice_init_sw_xlt2_db - init software XLT2 database from HW tables + * @hw: pointer to the hardware structure + * @blk: the HW block to initialize + */ +static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) +{ + u16 vsi; + + for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { + u16 vsig; + + vsig = hw->blk[blk].xlt2.t[vsi]; + if (vsig) { + ice_vsig_alloc_val(hw, blk, vsig); + ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); + /* no changes at this time, since this has been + * initialized from the original package + */ + hw->blk[blk].xlt2.vsis[vsi].changed = 0; + } + } +} + +/** + * ice_init_sw_db - init software database from HW tables + * @hw: pointer to the hardware structure + */ +static void ice_init_sw_db(struct ice_hw *hw) +{ + u16 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + ice_init_sw_xlt1_db(hw, (enum ice_block)i); + ice_init_sw_xlt2_db(hw, (enum ice_block)i); + } +} + +/** + * ice_fill_tbl - Reads content of a single table type into database + * @hw: pointer to the hardware structure + * @block_id: Block ID of the table to copy + * @sid: Section ID of the table to copy + * + * Will attempt to read the entire content of a given table of a single block + * into the driver database. We assume that the buffer will always + * be as large or larger than the data contained in the package. If + * this condition is not met, there is most likely an error in the package + * contents. + */ +static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) +{ + u32 dst_len, sect_len, offset = 0; + struct ice_prof_redir_section *pr; + struct ice_prof_id_section *pid; + struct ice_xlt1_section *xlt1; + struct ice_xlt2_section *xlt2; + struct ice_sw_fv_section *es; + struct ice_pkg_enum state; + u8 *src, *dst; + void *sect; + + /* if the HW segment pointer is null then the first iteration of + * ice_pkg_enum_section() will fail. In this case the HW tables will + * not be filled and return success. + */ + if (!hw->seg) { + ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); + return; + } + + memset(&state, 0, sizeof(state)); + + sect = ice_pkg_enum_section(hw->seg, &state, sid); + + while (sect) { + switch (sid) { + case ICE_SID_XLT1_SW: + case ICE_SID_XLT1_FD: + case ICE_SID_XLT1_RSS: + case ICE_SID_XLT1_ACL: + case ICE_SID_XLT1_PE: + xlt1 = (struct ice_xlt1_section *)sect; + src = xlt1->value; + sect_len = le16_to_cpu(xlt1->count) * + sizeof(*hw->blk[block_id].xlt1.t); + dst = hw->blk[block_id].xlt1.t; + dst_len = hw->blk[block_id].xlt1.count * + sizeof(*hw->blk[block_id].xlt1.t); + break; + case ICE_SID_XLT2_SW: + case ICE_SID_XLT2_FD: + case ICE_SID_XLT2_RSS: + case ICE_SID_XLT2_ACL: + case ICE_SID_XLT2_PE: + xlt2 = (struct ice_xlt2_section *)sect; + src = (__force u8 *)xlt2->value; + sect_len = le16_to_cpu(xlt2->count) * + sizeof(*hw->blk[block_id].xlt2.t); + dst = (u8 *)hw->blk[block_id].xlt2.t; + dst_len = hw->blk[block_id].xlt2.count * + sizeof(*hw->blk[block_id].xlt2.t); + break; + case ICE_SID_PROFID_TCAM_SW: + case ICE_SID_PROFID_TCAM_FD: + case ICE_SID_PROFID_TCAM_RSS: + case ICE_SID_PROFID_TCAM_ACL: + case ICE_SID_PROFID_TCAM_PE: + pid = (struct ice_prof_id_section *)sect; + src = (u8 *)pid->entry; + sect_len = le16_to_cpu(pid->count) * + sizeof(*hw->blk[block_id].prof.t); + dst = (u8 *)hw->blk[block_id].prof.t; + dst_len = hw->blk[block_id].prof.count * + sizeof(*hw->blk[block_id].prof.t); + break; + case ICE_SID_PROFID_REDIR_SW: + case ICE_SID_PROFID_REDIR_FD: + case ICE_SID_PROFID_REDIR_RSS: + case ICE_SID_PROFID_REDIR_ACL: + case ICE_SID_PROFID_REDIR_PE: + pr = (struct ice_prof_redir_section *)sect; + src = pr->redir_value; + sect_len = le16_to_cpu(pr->count) * + sizeof(*hw->blk[block_id].prof_redir.t); + dst = hw->blk[block_id].prof_redir.t; + dst_len = hw->blk[block_id].prof_redir.count * + sizeof(*hw->blk[block_id].prof_redir.t); + break; + case ICE_SID_FLD_VEC_SW: + case ICE_SID_FLD_VEC_FD: + case ICE_SID_FLD_VEC_RSS: + case ICE_SID_FLD_VEC_ACL: + case ICE_SID_FLD_VEC_PE: + es = (struct ice_sw_fv_section *)sect; + src = (u8 *)es->fv; + sect_len = (u32)(le16_to_cpu(es->count) * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + dst = (u8 *)hw->blk[block_id].es.t; + dst_len = (u32)(hw->blk[block_id].es.count * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + break; + default: + return; + } + + /* if the section offset exceeds destination length, terminate + * table fill. + */ + if (offset > dst_len) + return; + + /* if the sum of section size and offset exceed destination size + * then we are out of bounds of the HW table size for that PF. + * Changing section length to fill the remaining table space + * of that PF. + */ + if ((offset + sect_len) > dst_len) + sect_len = dst_len - offset; + + memcpy(dst + offset, src, sect_len); + offset += sect_len; + sect = ice_pkg_enum_section(NULL, &state, sid); + } +} + +/** + * ice_fill_blk_tbls - Read package context for tables + * @hw: pointer to the hardware structure + * + * Reads the current package contents and populates the driver + * database with the data iteratively for all advanced feature + * blocks. Assume that the HW tables have been allocated. + */ +void ice_fill_blk_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + enum ice_block blk_id = (enum ice_block)i; + + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); + } + + ice_init_sw_db(hw); +} + +/** + * ice_free_hw_tbls - free hardware table memory + * @hw: pointer to the hardware structure + */ +void ice_free_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + hw->blk[i].is_list_init = false; + + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written); + } + + memset(hw->blk, 0, sizeof(hw->blk)); +} + +/** + * ice_clear_hw_tbls - clear HW tables and flow profiles + * @hw: pointer to the hardware structure + */ +void ice_clear_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_tcam *prof = &hw->blk[i].prof; + struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; + struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; + struct ice_es *es = &hw->blk[i].es; + + memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes)); + memset(xlt1->ptg_tbl, 0, + ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl)); + memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t)); + + memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis)); + memset(xlt2->vsig_tbl, 0, + xlt2->count * sizeof(*xlt2->vsig_tbl)); + memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t)); + + memset(prof->t, 0, prof->count * sizeof(*prof->t)); + memset(prof_redir->t, 0, + prof_redir->count * sizeof(*prof_redir->t)); + + memset(es->t, 0, es->count * sizeof(*es->t)); + memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); + memset(es->written, 0, es->count * sizeof(*es->written)); + } +} + +/** + * ice_init_hw_tbls - init hardware table memory + * @hw: pointer to the hardware structure + */ +enum ice_status ice_init_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_tcam *prof = &hw->blk[i].prof; + struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; + struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; + struct ice_es *es = &hw->blk[i].es; + u16 j; + + if (hw->blk[i].is_list_init) + continue; + + hw->blk[i].is_list_init = true; + + hw->blk[i].overwrite = blk_sizes[i].overwrite; + es->reverse = blk_sizes[i].reverse; + + xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; + xlt1->count = blk_sizes[i].xlt1; + + xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, + sizeof(*xlt1->ptypes), GFP_KERNEL); + + if (!xlt1->ptypes) + goto err; + + xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS, + sizeof(*xlt1->ptg_tbl), + GFP_KERNEL); + + if (!xlt1->ptg_tbl) + goto err; + + xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, + sizeof(*xlt1->t), GFP_KERNEL); + if (!xlt1->t) + goto err; + + xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; + xlt2->count = blk_sizes[i].xlt2; + + xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->vsis), GFP_KERNEL); + + if (!xlt2->vsis) + goto err; + + xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->vsig_tbl), + GFP_KERNEL); + if (!xlt2->vsig_tbl) + goto err; + + for (j = 0; j < xlt2->count; j++) + INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); + + xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->t), GFP_KERNEL); + if (!xlt2->t) + goto err; + + prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; + prof->count = blk_sizes[i].prof_tcam; + prof->max_prof_id = blk_sizes[i].prof_id; + prof->cdid_bits = blk_sizes[i].prof_cdid_bits; + prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count, + sizeof(*prof->t), GFP_KERNEL); + + if (!prof->t) + goto err; + + prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; + prof_redir->count = blk_sizes[i].prof_redir; + prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw), + prof_redir->count, + sizeof(*prof_redir->t), + GFP_KERNEL); + + if (!prof_redir->t) + goto err; + + es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; + es->count = blk_sizes[i].es; + es->fvw = blk_sizes[i].fvw; + es->t = devm_kcalloc(ice_hw_to_dev(hw), + (u32)(es->count * es->fvw), + sizeof(*es->t), GFP_KERNEL); + if (!es->t) + goto err; + + es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->ref_count), + GFP_KERNEL); + + es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->written), GFP_KERNEL); + if (!es->ref_count) + goto err; + } + return 0; + +err: + ice_free_hw_tbls(hw); + return ICE_ERR_NO_MEMORY; +} diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h new file mode 100644 index 000000000000..37eb282742d1 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_FLEX_PIPE_H_ +#define _ICE_FLEX_PIPE_H_ + +#include "ice_type.h" + +/* Package minimal version supported */ +#define ICE_PKG_SUPP_VER_MAJ 1 +#define ICE_PKG_SUPP_VER_MNR 3 + +/* Package format version */ +#define ICE_PKG_FMT_VER_MAJ 1 +#define ICE_PKG_FMT_VER_MNR 0 +#define ICE_PKG_FMT_VER_UPD 0 +#define ICE_PKG_FMT_VER_DFT 0 + +#define ICE_PKG_CNT 4 + +enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); +enum ice_status +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); +enum ice_status ice_init_hw_tbls(struct ice_hw *hw); +void ice_free_seg(struct ice_hw *hw); +void ice_fill_blk_tbls(struct ice_hw *hw); +void ice_clear_hw_tbls(struct ice_hw *hw); +void ice_free_hw_tbls(struct ice_hw *hw); +#endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h new file mode 100644 index 000000000000..5d5a7eaffa30 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -0,0 +1,374 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_FLEX_TYPE_H_ +#define _ICE_FLEX_TYPE_H_ +/* Extraction Sequence (Field Vector) Table */ +struct ice_fv_word { + u8 prot_id; + u16 off; /* Offset within the protocol header */ + u8 resvrd; +} __packed; + +#define ICE_MAX_FV_WORDS 48 +struct ice_fv { + struct ice_fv_word ew[ICE_MAX_FV_WORDS]; +}; + +/* Package and segment headers and tables */ +struct ice_pkg_hdr { + struct ice_pkg_ver format_ver; + __le32 seg_count; + __le32 seg_offset[1]; +}; + +/* generic segment */ +struct ice_generic_seg_hdr { +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE 0x00000010 + __le32 seg_type; + struct ice_pkg_ver seg_ver; + __le32 seg_size; + char seg_name[ICE_PKG_NAME_SIZE]; +}; + +/* ice specific segment */ + +union ice_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct ice_device_id_entry { + union ice_device_id device; + union ice_device_id sub_device; +}; + +struct ice_seg { + struct ice_generic_seg_hdr hdr; + __le32 device_table_count; + struct ice_device_id_entry device_table[1]; +}; + +struct ice_nvm_table { + __le32 table_count; + __le32 vers[1]; +}; + +struct ice_buf { +#define ICE_PKG_BUF_SIZE 4096 + u8 buf[ICE_PKG_BUF_SIZE]; +}; + +struct ice_buf_table { + __le32 buf_count; + struct ice_buf buf_array[1]; +}; + +/* global metadata specific segment */ +struct ice_global_metadata_seg { + struct ice_generic_seg_hdr hdr; + struct ice_pkg_ver pkg_ver; + __le32 track_id; + char pkg_name[ICE_PKG_NAME_SIZE]; +}; + +#define ICE_MIN_S_OFF 12 +#define ICE_MAX_S_OFF 4095 +#define ICE_MIN_S_SZ 1 +#define ICE_MAX_S_SZ 4084 + +/* section information */ +struct ice_section_entry { + __le32 type; + __le16 offset; + __le16 size; +}; + +#define ICE_MIN_S_COUNT 1 +#define ICE_MAX_S_COUNT 511 +#define ICE_MIN_S_DATA_END 12 +#define ICE_MAX_S_DATA_END 4096 + +#define ICE_METADATA_BUF 0x80000000 + +struct ice_buf_hdr { + __le16 section_count; + __le16 data_end; + struct ice_section_entry section_entry[1]; +}; + +#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ + sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz)) + +/* ice package section IDs */ +#define ICE_SID_XLT1_SW 12 +#define ICE_SID_XLT2_SW 13 +#define ICE_SID_PROFID_TCAM_SW 14 +#define ICE_SID_PROFID_REDIR_SW 15 +#define ICE_SID_FLD_VEC_SW 16 + +#define ICE_SID_XLT1_ACL 22 +#define ICE_SID_XLT2_ACL 23 +#define ICE_SID_PROFID_TCAM_ACL 24 +#define ICE_SID_PROFID_REDIR_ACL 25 +#define ICE_SID_FLD_VEC_ACL 26 + +#define ICE_SID_XLT1_FD 32 +#define ICE_SID_XLT2_FD 33 +#define ICE_SID_PROFID_TCAM_FD 34 +#define ICE_SID_PROFID_REDIR_FD 35 +#define ICE_SID_FLD_VEC_FD 36 + +#define ICE_SID_XLT1_RSS 42 +#define ICE_SID_XLT2_RSS 43 +#define ICE_SID_PROFID_TCAM_RSS 44 +#define ICE_SID_PROFID_REDIR_RSS 45 +#define ICE_SID_FLD_VEC_RSS 46 + +#define ICE_SID_RXPARSER_BOOST_TCAM 56 + +#define ICE_SID_XLT1_PE 82 +#define ICE_SID_XLT2_PE 83 +#define ICE_SID_PROFID_TCAM_PE 84 +#define ICE_SID_PROFID_REDIR_PE 85 +#define ICE_SID_FLD_VEC_PE 86 + +/* Label Metadata section IDs */ +#define ICE_SID_LBL_FIRST 0x80000010 +#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 +/* The following define MUST be updated to reflect the last label section ID */ +#define ICE_SID_LBL_LAST 0x80000038 + +enum ice_block { + ICE_BLK_SW = 0, + ICE_BLK_ACL, + ICE_BLK_FD, + ICE_BLK_RSS, + ICE_BLK_PE, + ICE_BLK_COUNT +}; + +/* package labels */ +struct ice_label { + __le16 value; +#define ICE_PKG_LABEL_SIZE 64 + char name[ICE_PKG_LABEL_SIZE]; +}; + +struct ice_label_section { + __le16 count; + struct ice_label label[1]; +}; + +#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ + sizeof(struct ice_label_section) - sizeof(struct ice_label), \ + sizeof(struct ice_label)) + +struct ice_sw_fv_section { + __le16 count; + __le16 base_offset; + struct ice_fv fv[1]; +}; + +/* The BOOST TCAM stores the match packet header in reverse order, meaning + * the fields are reversed; in addition, this means that the normally big endian + * fields of the packet are now little endian. + */ +struct ice_boost_key_value { +#define ICE_BOOST_REMAINING_HV_KEY 15 + u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; + __le16 hv_dst_port_key; + __le16 hv_src_port_key; + u8 tcam_search_key; +} __packed; + +struct ice_boost_key { + struct ice_boost_key_value key; + struct ice_boost_key_value key2; +}; + +/* package Boost TCAM entry */ +struct ice_boost_tcam_entry { + __le16 addr; + __le16 reserved; + /* break up the 40 bytes of key into different fields */ + struct ice_boost_key key; + u8 boost_hit_index_group; + /* The following contains bitfields which are not on byte boundaries. + * These fields are currently unused by driver software. + */ +#define ICE_BOOST_BIT_FIELDS 43 + u8 bit_fields[ICE_BOOST_BIT_FIELDS]; +}; + +struct ice_boost_tcam_section { + __le16 count; + __le16 reserved; + struct ice_boost_tcam_entry tcam[1]; +}; + +#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ + sizeof(struct ice_boost_tcam_section) - \ + sizeof(struct ice_boost_tcam_entry), \ + sizeof(struct ice_boost_tcam_entry)) + +struct ice_xlt1_section { + __le16 count; + __le16 offset; + u8 value[1]; +} __packed; + +struct ice_xlt2_section { + __le16 count; + __le16 offset; + __le16 value[1]; +}; + +struct ice_prof_redir_section { + __le16 count; + __le16 offset; + u8 redir_value[1]; +}; + +struct ice_pkg_enum { + struct ice_buf_table *buf_table; + u32 buf_idx; + + u32 type; + struct ice_buf_hdr *buf; + u32 sect_idx; + void *sect; + u32 sect_type; + + u32 entry_idx; + void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); +}; + +struct ice_es { + u32 sid; + u16 count; + u16 fvw; + u16 *ref_count; + struct list_head prof_map; + struct ice_fv_word *t; + struct mutex prof_map_lock; /* protect access to profiles list */ + u8 *written; + u8 reverse; /* set to true to reverse FV order */ +}; + +/* PTYPE Group management */ + +/* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type + * group (PTG) ID as output. + * + * Note: PTG 0 is the default packet type group and it is assumed that all PTYPE + * are a part of this group until moved to a new PTG. + */ +#define ICE_DEFAULT_PTG 0 + +struct ice_ptg_entry { + struct ice_ptg_ptype *first_ptype; + u8 in_use; +}; + +struct ice_ptg_ptype { + struct ice_ptg_ptype *next_ptype; + u8 ptg; +}; + +struct ice_vsig_entry { + struct list_head prop_lst; + struct ice_vsig_vsi *first_vsi; + u8 in_use; +}; + +struct ice_vsig_vsi { + struct ice_vsig_vsi *next_vsi; + u32 prop_mask; + u16 changed; + u16 vsig; +}; + +#define ICE_XLT1_CNT 1024 +#define ICE_MAX_PTGS 256 + +/* XLT1 Table */ +struct ice_xlt1 { + struct ice_ptg_entry *ptg_tbl; + struct ice_ptg_ptype *ptypes; + u8 *t; + u32 sid; + u16 count; +}; + +#define ICE_XLT2_CNT 768 +#define ICE_MAX_VSIGS 768 + +/* VSIG bit layout: + * [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS + * [13:15]: PF number of device + */ +#define ICE_VSIG_IDX_M (0x1FFF) +#define ICE_PF_NUM_S 13 +#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S) +#define ICE_VSIG_VALUE(vsig, pf_id) \ + (u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \ + (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M)) +#define ICE_DEFAULT_VSIG 0 + +/* XLT2 Table */ +struct ice_xlt2 { + struct ice_vsig_entry *vsig_tbl; + struct ice_vsig_vsi *vsis; + u16 *t; + u32 sid; + u16 count; +}; + +/* Keys are made up of two values, each one-half the size of the key. + * For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values) + */ +#define ICE_TCAM_KEY_VAL_SZ 5 +#define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ) + +struct ice_prof_tcam_entry { + __le16 addr; + u8 key[ICE_TCAM_KEY_SZ]; + u8 prof_id; +} __packed; + +struct ice_prof_id_section { + __le16 count; + struct ice_prof_tcam_entry entry[1]; +} __packed; + +struct ice_prof_tcam { + u32 sid; + u16 count; + u16 max_prof_id; + struct ice_prof_tcam_entry *t; + u8 cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */ +}; + +struct ice_prof_redir { + u8 *t; + u32 sid; + u16 count; +}; + +/* Tables per block */ +struct ice_blk_info { + struct ice_xlt1 xlt1; + struct ice_xlt2 xlt2; + struct ice_prof_tcam prof; + struct ice_prof_redir prof_redir; + struct ice_es es; + u8 overwrite; /* set to true to allow overwrite of table entries */ + u8 is_list_init; +}; + +#endif /* _ICE_FLEX_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 6c5ce05742b1..152fbd556e9b 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -55,6 +55,8 @@ #define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) +#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) +#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) #define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0) @@ -127,8 +129,11 @@ #define GLINT_DYN_CTL_CLEARPBA_M BIT(1) #define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2) #define GLINT_DYN_CTL_ITR_INDX_S 3 +#define GLINT_DYN_CTL_ITR_INDX_M ICE_M(0x3, 3) #define GLINT_DYN_CTL_INTERVAL_S 5 +#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5) #define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25) +#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30) #define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) #define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) #define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) @@ -281,14 +286,10 @@ #define GL_PWR_MODE_CTL 0x000B820C #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) -#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) #define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) -#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) #define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) #define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) -#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) #define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) -#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) #define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) #define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) #define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) @@ -296,38 +297,22 @@ #define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) #define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) #define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) -#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) #define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) -#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) #define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) #define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) -#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) #define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) -#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) #define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) -#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) #define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) -#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) #define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) -#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) #define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) -#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) #define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) -#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) #define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) -#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) #define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) -#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) #define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) -#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) #define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) -#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) #define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) -#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) #define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) -#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) #define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) -#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) #define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) #define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64)) #define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64)) @@ -340,32 +325,23 @@ #define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) #define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64)) #define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) -#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) #define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) -#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) #define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) -#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) #define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) -#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) #define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) -#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) #define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) -#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) #define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) -#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) #define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) -#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) #define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) #define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) -#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) -#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define PF_VT_PFALLOC_HIF 0x0009DD80 #define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HLUT_MAX_INDEX 15 #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) +#define PRTRPB_RDPC 0x000AC260 #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 510a8c900e61..2aac8f13daeb 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -290,6 +290,7 @@ struct ice_rlan_ctx { u8 tphdata_ena; u8 tphhead_ena; u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 prefena; /* NOTE: normally must be set to 1 at init */ }; struct ice_ctx_ele { @@ -427,6 +428,7 @@ struct ice_tlan_ctx { #define ICE_TLAN_CTX_VMVF_TYPE_PF 2 u16 src_vsi; u8 tsyn_ena; + u8 internal_usage_flag; u8 alt_vlan; u16 cpuid; /* bigger than needed, see above for reason */ u8 wb_mode; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index a19f5920733b..cc755382df25 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -191,41 +191,58 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) } /** - * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings + * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring * @vsi: the VSI being configured * @ena: start or stop the Rx rings + * @rxq_idx: Rx queue index */ -static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) +#ifndef CONFIG_PCI_IOV +static +#endif /* !CONFIG_PCI_IOV */ +int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) { + int pf_q = vsi->rxq_map[rxq_idx]; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - int i, ret = 0; + int ret = 0; + u32 rx_reg; - for (i = 0; i < vsi->num_rxq; i++) { - int pf_q = vsi->rxq_map[i]; - u32 rx_reg; + rx_reg = rd32(hw, QRX_CTRL(pf_q)); - rx_reg = rd32(hw, QRX_CTRL(pf_q)); + /* Skip if the queue is already in the requested state */ + if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) + return 0; - /* Skip if the queue is already in the requested state */ - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - continue; + /* turn on/off the queue */ + if (ena) + rx_reg |= QRX_CTRL_QENA_REQ_M; + else + rx_reg &= ~QRX_CTRL_QENA_REQ_M; + wr32(hw, QRX_CTRL(pf_q), rx_reg); - /* turn on/off the queue */ - if (ena) - rx_reg |= QRX_CTRL_QENA_REQ_M; - else - rx_reg &= ~QRX_CTRL_QENA_REQ_M; - wr32(hw, QRX_CTRL(pf_q), rx_reg); - - /* wait for the change to finish */ - ret = ice_pf_rxq_wait(pf, pf_q, ena); - if (ret) { - dev_err(&pf->pdev->dev, - "VSI idx %d Rx ring %d %sable timeout\n", - vsi->idx, pf_q, (ena ? "en" : "dis")); + /* wait for the change to finish */ + ret = ice_pf_rxq_wait(pf, pf_q, ena); + if (ret) + dev_err(&pf->pdev->dev, + "VSI idx %d Rx ring %d %sable timeout\n", + vsi->idx, pf_q, (ena ? "en" : "dis")); + + return ret; +} + +/** + * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings + * @vsi: the VSI being configured + * @ena: start or stop the Rx rings + */ +static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) +{ + int i, ret = 0; + + for (i = 0; i < vsi->num_rxq; i++) { + ret = ice_vsi_ctrl_rx_ring(vsi, ena, i); + if (ret) break; - } } return ret; @@ -246,12 +263,24 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, sizeof(*vsi->tx_rings), GFP_KERNEL); if (!vsi->tx_rings) - goto err_txrings; + return -ENOMEM; vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, sizeof(*vsi->rx_rings), GFP_KERNEL); if (!vsi->rx_rings) - goto err_rxrings; + goto err_rings; + + vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + sizeof(*vsi->txq_map), GFP_KERNEL); + + if (!vsi->txq_map) + goto err_txq_map; + + vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + sizeof(*vsi->rxq_map), GFP_KERNEL); + if (!vsi->rxq_map) + goto err_rxq_map; + /* There is no need to allocate q_vectors for a loopback VSI. */ if (vsi->type == ICE_VSI_LB) @@ -266,10 +295,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) return 0; err_vectors: + devm_kfree(&pf->pdev->dev, vsi->rxq_map); +err_rxq_map: + devm_kfree(&pf->pdev->dev, vsi->txq_map); +err_txq_map: devm_kfree(&pf->pdev->dev, vsi->rx_rings); -err_rxrings: +err_rings: devm_kfree(&pf->pdev->dev, vsi->tx_rings); -err_txrings: return -ENOMEM; } @@ -311,9 +343,21 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) switch (vsi->type) { case ICE_VSI_PF: - vsi->alloc_txq = pf->num_lan_tx; - vsi->alloc_rxq = pf->num_lan_rx; - vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); + vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf), + num_online_cpus()); + + pf->num_lan_tx = vsi->alloc_txq; + + /* only 1 Rx queue unless RSS is enabled */ + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + vsi->alloc_rxq = 1; + else + vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf), + num_online_cpus()); + + pf->num_lan_rx = vsi->alloc_rxq; + + vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq); break; case ICE_VSI_VF: vf = &pf->vf[vsi->vf_id]; @@ -416,6 +460,14 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) devm_kfree(&pf->pdev->dev, vsi->rx_rings); vsi->rx_rings = NULL; } + if (vsi->txq_map) { + devm_kfree(&pf->pdev->dev, vsi->txq_map); + vsi->txq_map = NULL; + } + if (vsi->rxq_map) { + devm_kfree(&pf->pdev->dev, vsi->rxq_map); + vsi->rxq_map = NULL; + } } /** @@ -508,8 +560,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) vsi->type = type; vsi->back = pf; set_bit(__ICE_DOWN, vsi->state); + vsi->idx = pf->next_vsi; - vsi->work_lmt = ICE_DFLT_IRQ_WORK; if (type == ICE_VSI_VF) ice_vsi_set_num_qs(vsi, vf_id); @@ -647,7 +699,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) struct ice_qs_cfg tx_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, .pf_map = pf->avail_txqs, - .pf_map_size = ICE_MAX_TXQS, + .pf_map_size = pf->max_pf_txqs, .q_count = vsi->alloc_txq, .scatter_count = ICE_MAX_SCATTER_TXQS, .vsi_map = vsi->txq_map, @@ -657,7 +709,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) struct ice_qs_cfg rx_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, .pf_map = pf->avail_rxqs, - .pf_map_size = ICE_MAX_RXQS, + .pf_map_size = pf->max_pf_rxqs, .q_count = vsi->alloc_rxq, .scatter_count = ICE_MAX_SCATTER_RXQS, .vsi_map = vsi->rxq_map, @@ -701,6 +753,17 @@ void ice_vsi_put_qs(struct ice_vsi *vsi) } /** + * ice_is_safe_mode + * @pf: pointer to the PF struct + * + * returns true if driver is in safe mode, false otherwise + */ +bool ice_is_safe_mode(struct ice_pf *pf) +{ + return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); +} + +/** * ice_rss_clean - Delete RSS related VSI structures that hold user inputs * @vsi: the VSI being removed */ @@ -1010,6 +1073,13 @@ static int ice_vsi_init(struct ice_vsi *vsi) ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; } + /* Allow control frames out of main VSI */ + if (vsi->type == ICE_VSI_PF) { + ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; + ctxt->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + } + ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); if (ret) { dev_err(&pf->pdev->dev, @@ -1129,12 +1199,7 @@ static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) return -EEXIST; } - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - num_q_vectors = vsi->num_q_vectors; - } else { - err = -EINVAL; - goto err_out; - } + num_q_vectors = vsi->num_q_vectors; for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { err = ice_vsi_alloc_q_vector(vsi, v_idx); @@ -1180,9 +1245,6 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) return -EEXIST; } - if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - return -ENOENT; - num_q_vectors = vsi->num_q_vectors; /* reserve slots from OS requested IRQs */ vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, @@ -1477,40 +1539,32 @@ void ice_update_eth_stats(struct ice_vsi *vsi) prev_es = &vsi->eth_stats_prev; cur_es = &vsi->eth_stats; - ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_bytes, - &cur_es->rx_bytes); + ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_bytes, &cur_es->rx_bytes); - ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_unicast, - &cur_es->rx_unicast); + ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_unicast, &cur_es->rx_unicast); - ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_multicast, - &cur_es->rx_multicast); + ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_multicast, &cur_es->rx_multicast); - ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_broadcast, - &cur_es->rx_broadcast); + ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_broadcast, &cur_es->rx_broadcast); ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, &prev_es->rx_discards, &cur_es->rx_discards); - ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_bytes, - &cur_es->tx_bytes); + ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_bytes, &cur_es->tx_bytes); - ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_unicast, - &cur_es->tx_unicast); + ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_unicast, &cur_es->tx_unicast); - ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_multicast, - &cur_es->tx_multicast); + ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_multicast, &cur_es->tx_multicast); - ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_broadcast, - &cur_es->tx_broadcast); + ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_broadcast, &cur_es->tx_broadcast); ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, &prev_es->tx_errors, &cur_es->tx_errors); @@ -1658,6 +1712,62 @@ setup_rings: } /** + * ice_vsi_cfg_txq - Configure single Tx queue + * @vsi: the VSI that queue belongs to + * @ring: Tx ring to be configured + * @tc_q_idx: queue index within given TC + * @qg_buf: queue group buffer + * @tc: TC that Tx ring belongs to + */ +static int +ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx, + struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc) +{ + struct ice_tlan_ctx tlan_ctx = { 0 }; + struct ice_aqc_add_txqs_perq *txq; + struct ice_pf *pf = vsi->back; + u8 buf_len = sizeof(*qg_buf); + enum ice_status status; + u16 pf_q; + + pf_q = ring->reg_idx; + ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); + /* copy context contents into the qg_buf */ + qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); + ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, + ice_tlan_ctx_info); + + /* init queue specific tail reg. It is referred as + * transmit comm scheduler queue doorbell. + */ + ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); + + /* Add unique software queue handle of the Tx queue per + * TC into the VSI Tx ring + */ + ring->q_handle = tc_q_idx; + + status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, + 1, qg_buf, buf_len, NULL); + if (status) { + dev_err(&pf->pdev->dev, + "Failed to set LAN Tx queue context, error: %d\n", + status); + return -ENODEV; + } + + /* Add Tx Queue TEID into the VSI Tx ring from the + * response. This will complete configuring and + * enabling the queue. + */ + txq = &qg_buf->txqs[0]; + if (pf_q == le16_to_cpu(txq->txq_id)) + ring->txq_teid = le32_to_cpu(txq->q_teid); + + return 0; +} + +/** * ice_vsi_cfg_txqs - Configure the VSI for Tx * @vsi: the VSI being configured * @rings: Tx ring array to be configured @@ -1670,20 +1780,16 @@ static int ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) { struct ice_aqc_add_tx_qgrp *qg_buf; - struct ice_aqc_add_txqs_perq *txq; struct ice_pf *pf = vsi->back; - u8 num_q_grps, q_idx = 0; - enum ice_status status; - u16 buf_len, i, pf_q; - int err = 0, tc; + u16 q_idx = 0, i; + int err = 0; + u8 tc; - buf_len = sizeof(*qg_buf); - qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); + qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL); if (!qg_buf) return -ENOMEM; qg_buf->num_txqs = 1; - num_q_grps = 1; /* set up and configure the Tx queues for each enabled TC */ ice_for_each_traffic_class(tc) { @@ -1691,39 +1797,10 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) break; for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { - struct ice_tlan_ctx tlan_ctx = { 0 }; - - pf_q = vsi->txq_map[q_idx + offset]; - ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q); - /* copy context contents into the qg_buf */ - qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); - - /* init queue specific tail reg. It is referred as - * transmit comm scheduler queue doorbell. - */ - rings[q_idx]->tail = - pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); - status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, - i, num_q_grps, qg_buf, - buf_len, NULL); - if (status) { - dev_err(&pf->pdev->dev, - "Failed to set LAN Tx queue context, error: %d\n", - status); - err = -ENODEV; + err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset, + qg_buf, tc); + if (err) goto err_cfg_txqs; - } - - /* Add Tx Queue TEID into the VSI Tx ring from the - * response. This will complete configuring and - * enabling the queue. - */ - txq = &qg_buf->txqs[0]; - if (pf_q == le16_to_cpu(txq->txq_id)) - rings[q_idx]->txq_teid = - le32_to_cpu(txq->q_teid); q_idx++; } @@ -2070,45 +2147,112 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) } /** - * ice_vsi_stop_tx_rings - Disable Tx rings + * ice_vsi_stop_tx_ring - Disable single Tx ring * @vsi: the VSI being configured * @rst_src: reset source * @rel_vmvf_num: Relative ID of VF/VM - * @rings: Tx ring array to be stopped - * @offset: offset within vsi->txq_map + * @ring: Tx ring to be stopped + * @txq_meta: Meta data of Tx ring to be stopped */ -static int -ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num, struct ice_ring **rings, int offset) +#ifndef CONFIG_PCI_IOV +static +#endif /* !CONFIG_PCI_IOV */ +int +ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring *ring, + struct ice_txq_meta *txq_meta) { struct ice_pf *pf = vsi->back; + struct ice_q_vector *q_vector; struct ice_hw *hw = &pf->hw; - int tc, q_idx = 0, err = 0; - u16 *q_ids, *q_handles, i; enum ice_status status; - u32 *q_teids, val; + u32 val; - if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) - return -EINVAL; + /* clear cause_ena bit for disabled queues */ + val = rd32(hw, QINT_TQCTL(ring->reg_idx)); + val &= ~QINT_TQCTL_CAUSE_ENA_M; + wr32(hw, QINT_TQCTL(ring->reg_idx), val); - q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), - GFP_KERNEL); - if (!q_teids) - return -ENOMEM; + /* software is expected to wait for 100 ns */ + ndelay(100); - q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), - GFP_KERNEL); - if (!q_ids) { - err = -ENOMEM; - goto err_alloc_q_ids; + /* trigger a software interrupt for the vector + * associated to the queue to schedule NAPI handler + */ + q_vector = ring->q_vector; + if (q_vector) + ice_trigger_sw_intr(hw, q_vector); + + status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, + txq_meta->tc, 1, &txq_meta->q_handle, + &txq_meta->q_id, &txq_meta->q_teid, rst_src, + rel_vmvf_num, NULL); + + /* if the disable queue command was exercised during an + * active reset flow, ICE_ERR_RESET_ONGOING is returned. + * This is not an error as the reset operation disables + * queues at the hardware level anyway. + */ + if (status == ICE_ERR_RESET_ONGOING) { + dev_dbg(&vsi->back->pdev->dev, + "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status == ICE_ERR_DOES_NOT_EXIST) { + dev_dbg(&vsi->back->pdev->dev, + "LAN Tx queues do not exist, nothing to disable\n"); + } else if (status) { + dev_err(&vsi->back->pdev->dev, + "Failed to disable LAN Tx queues, error: %d\n", status); + return -ENODEV; } - q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, - sizeof(*q_handles), GFP_KERNEL); - if (!q_handles) { - err = -ENOMEM; - goto err_alloc_q_handles; - } + return 0; +} + +/** + * ice_fill_txq_meta - Prepare the Tx queue's meta data + * @vsi: VSI that ring belongs to + * @ring: ring that txq_meta will be based on + * @txq_meta: a helper struct that wraps Tx queue's information + * + * Set up a helper struct that will contain all the necessary fields that + * are needed for stopping Tx queue + */ +#ifndef CONFIG_PCI_IOV +static +#endif /* !CONFIG_PCI_IOV */ +void +ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, + struct ice_txq_meta *txq_meta) +{ + u8 tc = 0; + +#ifdef CONFIG_DCB + tc = ring->dcb_tc; +#endif /* CONFIG_DCB */ + txq_meta->q_id = ring->reg_idx; + txq_meta->q_teid = ring->txq_teid; + txq_meta->q_handle = ring->q_handle; + txq_meta->vsi_idx = vsi->idx; + txq_meta->tc = tc; +} + +/** + * ice_vsi_stop_tx_rings - Disable Tx rings + * @vsi: the VSI being configured + * @rst_src: reset source + * @rel_vmvf_num: Relative ID of VF/VM + * @rings: Tx ring array to be stopped + */ +static int +ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring **rings) +{ + u16 i, q_idx = 0; + int status; + u8 tc; + + if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) + return -EINVAL; /* set up the Tx queue list to be disabled for each enabled TC */ ice_for_each_traffic_class(tc) { @@ -2116,64 +2260,24 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, break; for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { - struct ice_q_vector *q_vector; + struct ice_txq_meta txq_meta = { }; - if (!rings || !rings[q_idx]) { - err = -EINVAL; - goto err_out; - } - - q_ids[i] = vsi->txq_map[q_idx + offset]; - q_teids[i] = rings[q_idx]->txq_teid; - q_handles[i] = i; + if (!rings || !rings[q_idx]) + return -EINVAL; - /* clear cause_ena bit for disabled queues */ - val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); + ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); + status = ice_vsi_stop_tx_ring(vsi, rst_src, + rel_vmvf_num, + rings[q_idx], &txq_meta); - /* software is expected to wait for 100 ns */ - ndelay(100); - - /* trigger a software interrupt for the vector - * associated to the queue to schedule NAPI handler - */ - q_vector = rings[i]->q_vector; - if (q_vector) - ice_trigger_sw_intr(hw, q_vector); + if (status) + return status; q_idx++; } - status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc, - vsi->num_txq, q_handles, q_ids, - q_teids, rst_src, rel_vmvf_num, NULL); - - /* if the disable queue command was exercised during an active - * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not - * an error as the reset operation disables queues at the - * hardware level anyway. - */ - if (status == ICE_ERR_RESET_ONGOING) { - dev_dbg(&pf->pdev->dev, - "Reset in progress. LAN Tx queues already disabled\n"); - } else if (status) { - dev_err(&pf->pdev->dev, - "Failed to disable LAN Tx queues, error: %d\n", - status); - err = -ENODEV; - } } -err_out: - devm_kfree(&pf->pdev->dev, q_handles); - -err_alloc_q_handles: - devm_kfree(&pf->pdev->dev, q_ids); - -err_alloc_q_ids: - devm_kfree(&pf->pdev->dev, q_teids); - - return err; + return 0; } /** @@ -2186,8 +2290,7 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num) { - return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, - 0); + return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings); } /** @@ -2497,9 +2600,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (ret) goto unroll_vector_base; - pf->q_left_tx -= vsi->alloc_txq; - pf->q_left_rx -= vsi->alloc_rxq; - /* Do not exit if configuring RSS had an issue, at least * receive traffic on first queue. Hence no need to capture * return value @@ -2519,7 +2619,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); @@ -2540,15 +2640,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, * DCB settings in the HW. Also, if the FW DCBX engine is not running * then Rx LLDP packets need to be redirected up the stack. */ - if (vsi->type == ICE_VSI_PF) { - ice_vsi_add_rem_eth_mac(vsi, true); + if (!ice_is_safe_mode(pf)) { + if (vsi->type == ICE_VSI_PF) { + ice_vsi_add_rem_eth_mac(vsi, true); - /* Tx LLDP packets */ - ice_cfg_sw_lldp(vsi, true, true); + /* Tx LLDP packets */ + ice_cfg_sw_lldp(vsi, true, true); - /* Rx LLDP packets */ - if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) - ice_cfg_sw_lldp(vsi, false, true); + /* Rx LLDP packets */ + if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) + ice_cfg_sw_lldp(vsi, false, true); + } } return vsi; @@ -2563,8 +2665,6 @@ unroll_vsi_init: ice_vsi_delete(vsi); unroll_get_qs: ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; ice_vsi_clear(vsi); return NULL; @@ -2610,39 +2710,36 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; int base = vsi->base_vector; + int i; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - int i; - - if (!vsi->q_vectors || !vsi->irqs_ready) - return; + if (!vsi->q_vectors || !vsi->irqs_ready) + return; - ice_vsi_release_msix(vsi); - if (vsi->type == ICE_VSI_VF) - return; + ice_vsi_release_msix(vsi); + if (vsi->type == ICE_VSI_VF) + return; - vsi->irqs_ready = false; - ice_for_each_q_vector(vsi, i) { - u16 vector = i + base; - int irq_num; + vsi->irqs_ready = false; + ice_for_each_q_vector(vsi, i) { + u16 vector = i + base; + int irq_num; - irq_num = pf->msix_entries[vector].vector; + irq_num = pf->msix_entries[vector].vector; - /* free only the irqs that were actually requested */ - if (!vsi->q_vectors[i] || - !(vsi->q_vectors[i]->num_ring_tx || - vsi->q_vectors[i]->num_ring_rx)) - continue; + /* free only the irqs that were actually requested */ + if (!vsi->q_vectors[i] || + !(vsi->q_vectors[i]->num_ring_tx || + vsi->q_vectors[i]->num_ring_rx)) + continue; - /* clear the affinity notifier in the IRQ descriptor */ - irq_set_affinity_notifier(irq_num, NULL); + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); - synchronize_irq(irq_num); - devm_free_irq(&pf->pdev->dev, irq_num, - vsi->q_vectors[i]); - } + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(irq_num, NULL); + synchronize_irq(irq_num); + devm_free_irq(&pf->pdev->dev, irq_num, + vsi->q_vectors[i]); } } @@ -2821,15 +2918,20 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) } /* disable each interrupt */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - ice_for_each_q_vector(vsi, i) - wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); + ice_for_each_q_vector(vsi, i) { + if (!vsi->q_vectors[i]) + continue; + wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); + } - ice_flush(hw); + ice_flush(hw); - ice_for_each_q_vector(vsi, i) - synchronize_irq(pf->msix_entries[i + base].vector); - } + /* don't call synchronize_irq() for VF's from the host */ + if (vsi->type == ICE_VSI_VF) + return; + + ice_for_each_q_vector(vsi, i) + synchronize_irq(pf->msix_entries[i + base].vector); } /** @@ -2889,14 +2991,16 @@ int ice_vsi_release(struct ice_vsi *vsi) pf->num_avail_sw_msix += vsi->num_q_vectors; } - if (vsi->type == ICE_VSI_PF) { - ice_vsi_add_rem_eth_mac(vsi, false); - ice_cfg_sw_lldp(vsi, true, false); - /* The Rx rule will only exist to remove if the LLDP FW - * engine is currently stopped - */ - if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) - ice_cfg_sw_lldp(vsi, false, false); + if (!ice_is_safe_mode(pf)) { + if (vsi->type == ICE_VSI_PF) { + ice_vsi_add_rem_eth_mac(vsi, false); + ice_cfg_sw_lldp(vsi, true, false); + /* The Rx rule will only exist to remove if the LLDP FW + * engine is currently stopped + */ + if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) + ice_cfg_sw_lldp(vsi, false, false); + } } ice_remove_vsi_fltr(&pf->hw, vsi->idx); @@ -2913,8 +3017,6 @@ int ice_vsi_release(struct ice_vsi *vsi) ice_vsi_clear_rings(vsi); ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; /* retain SW VSI data structure since it is needed to unregister and * free VSI netdev when PF is not in reset recovery pending state,\ @@ -2962,6 +3064,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) vsi->base_vector = 0; } + ice_vsi_put_qs(vsi); ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi); ice_dev_onetime_setup(&pf->hw); @@ -2969,6 +3072,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ice_vsi_set_num_qs(vsi, vf->vf_id); else ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); + + ret = ice_vsi_alloc_arrays(vsi); + if (ret < 0) + goto err_vsi; + + ice_vsi_get_qs(vsi); ice_vsi_set_tc_cfg(vsi); /* Initialize VSI struct elements and create VSI in FW */ @@ -2976,9 +3085,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret < 0) goto err_vsi; - ret = ice_vsi_alloc_arrays(vsi); - if (ret < 0) - goto err_vsi; switch (vsi->type) { case ICE_VSI_PF: @@ -2986,6 +3092,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_rings; + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto err_vectors; + ret = ice_vsi_set_q_vectors_reg_idx(vsi); if (ret) goto err_vectors; @@ -3007,10 +3117,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_rings; - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_vectors; - ret = ice_vsi_set_q_vectors_reg_idx(vsi); if (ret) goto err_vectors; @@ -3019,8 +3125,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_vectors; - pf->q_left_tx -= vsi->alloc_txq; - pf->q_left_rx -= vsi->alloc_rxq; break; default: break; @@ -3028,7 +3132,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); @@ -3083,48 +3187,6 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) } /** - * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration - * @vsi: the VSI being configured - * @ena_tc: TC map to be enabled - */ -static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) -{ - struct net_device *netdev = vsi->netdev; - struct ice_pf *pf = vsi->back; - struct ice_dcbx_cfg *dcbcfg; - u8 netdev_tc; - int i; - - if (!netdev) - return; - - if (!ena_tc) { - netdev_reset_tc(netdev); - return; - } - - if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) - return; - - dcbcfg = &pf->hw.port_info->local_dcbx_cfg; - - ice_for_each_traffic_class(i) - if (vsi->tc_cfg.ena_tc & BIT(i)) - netdev_set_tc_queue(netdev, - vsi->tc_cfg.tc_info[i].netdev_tc, - vsi->tc_cfg.tc_info[i].qcount_tx, - vsi->tc_cfg.tc_info[i].qoffset); - - for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { - u8 ets_tc = dcbcfg->etscfg.prio_table[i]; - - /* Get the mapped netdev TC# for the UP */ - netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; - netdev_set_prio_tc_map(netdev, i, netdev_tc); - } -} - -/** * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map * @vsi: VSI to be configured * @ena_tc: TC bitmap @@ -3145,7 +3207,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) if (ena_tc & BIT(i)) num_tc++; /* populate max_txqs per TC */ - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; } vsi->tc_cfg.ena_tc = ena_tc; @@ -3188,3 +3250,52 @@ out: return ret; } #endif /* CONFIG_DCB */ + +/** + * ice_nvm_version_str - format the NVM version strings + * @hw: ptr to the hardware info + */ +char *ice_nvm_version_str(struct ice_hw *hw) +{ + u8 oem_ver, oem_patch, ver_hi, ver_lo; + static char buf[ICE_NVM_VER_LEN]; + u16 oem_build; + + ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi, + &ver_lo); + + snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo, + hw->nvm.eetrack, oem_ver, oem_build, oem_patch); + + return buf; +} + +/** + * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI + * @vsi: the VSI being configured MAC filter + * @macaddr: the MAC address to be added. + * @set: Add or delete a MAC filter + * + * Adds or removes MAC address filter entry for VF VSI + */ +enum ice_status +ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set) +{ + LIST_HEAD(tmp_add_list); + enum ice_status status; + + /* Update MAC filter list to be added or removed for a VSI */ + if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) { + status = ICE_ERR_NO_MEMORY; + goto cfg_mac_fltr_exit; + } + + if (set) + status = ice_add_mac(&vsi->back->hw, &tmp_add_list); + else + status = ice_remove_mac(&vsi->back->hw, &tmp_add_list); + +cfg_mac_fltr_exit: + ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list); + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 6e43ef03bfc3..47bc033fff20 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -6,8 +6,22 @@ #include "ice.h" -int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, - const u8 *macaddr); +struct ice_txq_meta { + /* Tx-scheduler element identifier */ + u32 q_teid; + /* Entry in VSI's txq_map bitmap */ + u16 q_id; + /* Relative index of Tx queue within TC */ + u16 q_handle; + /* VSI index that Tx queue belongs to */ + u16 vsi_idx; + /* TC number that Tx queue belongs to */ + u8 tc; +}; + +int +ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, + const u8 *macaddr); void ice_free_fltr_list(struct device *dev, struct list_head *h); @@ -25,6 +39,16 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); void ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); + +int +ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring *ring, + struct ice_txq_meta *txq_meta); + +void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, + struct ice_txq_meta *txq_meta); + +int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); #endif /* CONFIG_PCI_IOV */ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); @@ -95,4 +119,11 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi); int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); + +char *ice_nvm_version_str(struct ice_hw *hw); + +enum ice_status +ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); + +bool ice_is_safe_mode(struct ice_pf *pf); #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 63db08d9bafa..214cd6eca405 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -9,16 +9,27 @@ #include "ice_lib.h" #include "ice_dcb_lib.h" -#define DRV_VERSION "0.7.4-k" +#define DRV_VERSION_MAJOR 0 +#define DRV_VERSION_MINOR 8 +#define DRV_VERSION_BUILD 1 + +#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." \ + __stringify(DRV_VERSION_BUILD) "-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; +/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ +#define ICE_DDP_PKG_PATH "intel/ice/ddp/" +#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" + MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); +MODULE_FIRMWARE(ICE_DDP_PKG_FILE); static int debug = -1; module_param(debug, int, 0644); @@ -29,24 +40,23 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); #endif /* !CONFIG_DYNAMIC_DEBUG */ static struct workqueue_struct *ice_wq; +static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; -static void ice_rebuild(struct ice_pf *pf); +static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); static void ice_vsi_release_all(struct ice_pf *pf); -static void ice_update_vsi_stats(struct ice_vsi *vsi); -static void ice_update_pf_stats(struct ice_pf *pf); /** * ice_get_tx_pending - returns number of Tx descriptors not processed * @ring: the ring of descriptors */ -static u32 ice_get_tx_pending(struct ice_ring *ring) +static u16 ice_get_tx_pending(struct ice_ring *ring) { - u32 head, tail; + u16 head, tail; head = ring->next_to_clean; - tail = readl(ring->tail); + tail = ring->next_to_use; if (head != tail) return (head < tail) ? @@ -118,12 +128,11 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) */ static int ice_init_mac_fltr(struct ice_pf *pf) { - LIST_HEAD(tmp_add_list); + enum ice_status status; u8 broadcast[ETH_ALEN]; struct ice_vsi *vsi; - int status; - vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); + vsi = ice_get_main_vsi(pf); if (!vsi) return -EINVAL; @@ -132,8 +141,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf) */ /* Add a unicast MAC filter so the VSI can get its packets */ - status = ice_add_mac_to_list(vsi, &tmp_add_list, - vsi->port_info->mac.perm_addr); + status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true); if (status) goto unregister; @@ -141,18 +149,11 @@ static int ice_init_mac_fltr(struct ice_pf *pf) * MAC address to the list as well. */ eth_broadcast_addr(broadcast); - status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); - if (status) - goto free_mac_list; - - /* Program MAC filters for entries in tmp_add_list */ - status = ice_add_mac(&pf->hw, &tmp_add_list); + status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true); if (status) - status = -ENOMEM; - -free_mac_list: - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + goto unregister; + return 0; unregister: /* We aren't useful with no MAC filters, so unregister if we * had an error @@ -166,7 +167,7 @@ unregister: vsi->netdev = NULL; } - return status; + return -EIO; } /** @@ -447,13 +448,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) if (vsi->type == ICE_VSI_PF && vsi->netdev) { if (netif_running(vsi->netdev)) { - if (!locked) { + if (!locked) rtnl_lock(); - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + + ice_stop(vsi->netdev); + + if (!locked) rtnl_unlock(); - } else { - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - } } else { ice_vsi_close(vsi); } @@ -488,6 +489,7 @@ static void ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; + int i; /* already prepared for reset */ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) @@ -497,6 +499,12 @@ ice_prepare_for_reset(struct ice_pf *pf) if (ice_check_sq_alive(hw, &hw->mailboxq)) ice_vc_notify_reset(pf); + /* Disable VFs until reset is completed */ + for (i = 0; i < pf->num_alloc_vfs; i++) + ice_set_vf_state_qs_dis(&pf->vf[i]); + + /* clear SW filtering DB */ + ice_clear_hw_tbls(hw); /* disable the VSIs and their queues that are not already DOWN */ ice_pf_dis_all_vsi(pf, false); @@ -542,7 +550,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) */ if (reset_type == ICE_RESET_PFR) { pf->pfr_count++; - ice_rebuild(pf); + ice_rebuild(pf, reset_type); clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); clear_bit(__ICE_PFR_REQ, pf->state); ice_reset_all_vfs(pf, true); @@ -573,6 +581,8 @@ static void ice_reset_subtask(struct ice_pf *pf) reset_type = ICE_RESET_CORER; if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state)) reset_type = ICE_RESET_GLOBR; + if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state)) + reset_type = ICE_RESET_EMPR; /* return if no valid reset type requested */ if (reset_type == ICE_RESET_INVAL) return; @@ -584,7 +594,7 @@ static void ice_reset_subtask(struct ice_pf *pf) } else { /* done with reset. start rebuild */ pf->hw.reset_ongoing = false; - ice_rebuild(pf); + ice_rebuild(pf, reset_type); /* clear bit to resume normal operations, but * ICE_NEEDS_RESTART bit is set in case rebuild failed */ @@ -618,6 +628,22 @@ static void ice_reset_subtask(struct ice_pf *pf) } /** + * ice_print_topo_conflict - print topology conflict message + * @vsi: the VSI whose topology status is being checked + */ +static void ice_print_topo_conflict(struct ice_vsi *vsi) +{ + switch (vsi->port_info->phy.link_info.topo_media_conflict) { + case ICE_AQ_LINK_TOPO_CONFLICT: + case ICE_AQ_LINK_MEDIA_CONFLICT: + netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n"); + break; + default: + break; + } +} + +/** * ice_print_link_msg - print link up or down message * @vsi: the VSI whose link status is being queried * @isup: boolean for if the link is now up or down @@ -630,6 +656,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) const char *speed; const char *fec; const char *fc; + const char *an; if (!vsi) return; @@ -713,6 +740,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) break; } + /* check if autoneg completed, might be false due to not supported */ + if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) + an = "True"; + else + an = "False"; + /* Get FEC mode requested based on PHY caps last SW configuration */ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); if (!caps) { @@ -737,8 +770,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) devm_kfree(&vsi->back->pdev->dev, caps); done: - netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Flow Control: %s\n", - speed, fec_req, fec, fc); + netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n", + speed, fec_req, fec, an, fc); + ice_print_topo_conflict(vsi); } /** @@ -806,10 +840,24 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (link_up == old_link && link_speed == old_link_speed) return result; - vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); + vsi = ice_get_main_vsi(pf); if (!vsi || !vsi->port_info) return -EINVAL; + /* turn off PHY if media was removed */ + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && + !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); + + result = ice_aq_set_link_restart_an(pi, false, NULL); + if (result) { + dev_dbg(&pf->pdev->dev, + "Failed to set link down, VSI %d error %d\n", + vsi->vsi_num, result); + return result; + } + } + ice_vsi_link_event(vsi, link_up); ice_print_link_msg(vsi, link_up); @@ -1307,14 +1355,134 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (vf_mdd_detected) { vf->num_mdd_events++; - if (vf->num_mdd_events > 1) - dev_info(&pf->pdev->dev, "VF %d has had %llu MDD events since last boot\n", + if (vf->num_mdd_events && + vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD) + dev_info(&pf->pdev->dev, + "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n", i, vf->num_mdd_events); } } } /** + * ice_force_phys_link_state - Force the physical link state + * @vsi: VSI to force the physical link state to up/down + * @link_up: true/false indicates to set the physical link to up/down + * + * Force the physical link state by getting the current PHY capabilities from + * hardware and setting the PHY config based on the determined capabilities. If + * link changes a link event will be triggered because both the Enable Automatic + * Link Update and LESM Enable bits are set when setting the PHY capabilities. + * + * Returns 0 on success, negative on failure + */ +static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_aqc_set_phy_cfg_data *cfg; + struct ice_port_info *pi; + struct device *dev; + int retcode; + + if (!vsi || !vsi->port_info || !vsi->back) + return -EINVAL; + if (vsi->type != ICE_VSI_PF) + return 0; + + dev = &vsi->back->pdev->dev; + + pi = vsi->port_info; + + pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (retcode) { + dev_err(dev, + "Failed to get phy capabilities, VSI %d error %d\n", + vsi->vsi_num, retcode); + retcode = -EIO; + goto out; + } + + /* No change in link */ + if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && + link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) + goto out; + + cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + retcode = -ENOMEM; + goto out; + } + + cfg->phy_type_low = pcaps->phy_type_low; + cfg->phy_type_high = pcaps->phy_type_high; + cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + cfg->low_power_ctrl = pcaps->low_power_ctrl; + cfg->eee_cap = pcaps->eee_cap; + cfg->eeer_value = pcaps->eeer_value; + cfg->link_fec_opt = pcaps->link_fec_options; + if (link_up) + cfg->caps |= ICE_AQ_PHY_ENA_LINK; + else + cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; + + retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); + if (retcode) { + dev_err(dev, "Failed to set phy config, VSI %d error %d\n", + vsi->vsi_num, retcode); + retcode = -EIO; + } + + devm_kfree(dev, cfg); +out: + devm_kfree(dev, pcaps); + return retcode; +} + +/** + * ice_check_media_subtask - Check for media; bring link up if detected. + * @pf: pointer to PF struct + */ +static void ice_check_media_subtask(struct ice_pf *pf) +{ + struct ice_port_info *pi; + struct ice_vsi *vsi; + int err; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return; + + /* No need to check for media if it's already present or the interface + * is down + */ + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) || + test_bit(__ICE_DOWN, vsi->state)) + return; + + /* Refresh link info and check if media is present */ + pi = vsi->port_info; + err = ice_update_link_info(pi); + if (err) + return; + + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { + err = ice_force_phys_link_state(vsi, true); + if (err) + return; + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); + + /* A Link Status Event will be generated; the event handler + * will complete bringing the interface up + */ + } +} + +/** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct */ @@ -1336,12 +1504,19 @@ static void ice_service_task(struct work_struct *work) return; } + ice_clean_adminq_subtask(pf); + ice_check_media_subtask(pf); ice_check_for_hang_subtask(pf); ice_sync_fltr_subtask(pf); ice_handle_mdd_event(pf); - ice_process_vflr_event(pf); ice_watchdog_subtask(pf); - ice_clean_adminq_subtask(pf); + + if (ice_is_safe_mode(pf)) { + ice_service_task_complete(pf); + return; + } + + ice_process_vflr_event(pf); ice_clean_mailboxq_subtask(pf); /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ @@ -1369,8 +1544,8 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; - hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; - hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; + hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN; + hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; } @@ -1409,15 +1584,11 @@ static void ice_irq_affinity_release(struct kref __always_unused *ref) {} */ static int ice_vsi_ena_irq(struct ice_vsi *vsi) { - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; + struct ice_hw *hw = &vsi->back->hw; + int i; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - int i; - - ice_for_each_q_vector(vsi, i) - ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); - } + ice_for_each_q_vector(vsi, i) + ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); ice_flush(hw); return 0; @@ -1665,7 +1836,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, 0); ice_flush(hw); - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { + if (pf->msix_entries) { synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); devm_free_irq(&pf->pdev->dev, pf->msix_entries[pf->oicr_idx].vector, pf); @@ -1780,30 +1951,41 @@ static void ice_napi_add(struct ice_vsi *vsi) } /** - * ice_cfg_netdev - Allocate, configure and register a netdev - * @vsi: the VSI associated with the new netdev - * - * Returns 0 on success, negative value on failure + * ice_set_ops - set netdev and ethtools ops for the given netdev + * @netdev: netdev instance */ -static int ice_cfg_netdev(struct ice_vsi *vsi) +static void ice_set_ops(struct net_device *netdev) { + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + if (ice_is_safe_mode(pf)) { + netdev->netdev_ops = &ice_netdev_safe_mode_ops; + ice_set_ethtool_safe_mode_ops(netdev); + return; + } + + netdev->netdev_ops = &ice_netdev_ops; + ice_set_ethtool_ops(netdev); +} + +/** + * ice_set_netdev_features - set features for the given netdev + * @netdev: netdev instance + */ +static void ice_set_netdev_features(struct net_device *netdev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); netdev_features_t csumo_features; netdev_features_t vlano_features; netdev_features_t dflt_features; netdev_features_t tso_features; - struct ice_netdev_priv *np; - struct net_device *netdev; - u8 mac_addr[ETH_ALEN]; - int err; - - netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, - vsi->alloc_rxq); - if (!netdev) - return -ENOMEM; - vsi->netdev = netdev; - np = netdev_priv(netdev); - np->vsi = vsi; + if (ice_is_safe_mode(pf)) { + /* safe mode */ + netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; + netdev->hw_features = netdev->features; + return; + } dflt_features = NETIF_F_SG | NETIF_F_HIGHDMA | @@ -1831,25 +2013,50 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) tso_features; netdev->vlan_features |= dflt_features | csumo_features | tso_features; +} + +/** + * ice_cfg_netdev - Allocate, configure and register a netdev + * @vsi: the VSI associated with the new netdev + * + * Returns 0 on success, negative value on failure + */ +static int ice_cfg_netdev(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_netdev_priv *np; + struct net_device *netdev; + u8 mac_addr[ETH_ALEN]; + int err; + + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, + vsi->alloc_rxq); + if (!netdev) + return -ENOMEM; + + vsi->netdev = netdev; + np = netdev_priv(netdev); + np->vsi = vsi; + + ice_set_netdev_features(netdev); + + ice_set_ops(netdev); if (vsi->type == ICE_VSI_PF) { - SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); + SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); - ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); } netdev->priv_flags |= IFF_UNICAST_FLT; - /* assign netdev_ops */ - netdev->netdev_ops = &ice_netdev_ops; + /* Setup netdev TC information */ + ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); /* setup watchdog timeout value to be 5 second */ netdev->watchdog_timeo = 5 * HZ; - ice_set_ethtool_ops(netdev); - netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = ICE_MAX_MTU; @@ -2041,36 +2248,48 @@ unroll_vsi_setup: ice_vsi_free_q_vectors(vsi); ice_vsi_delete(vsi); ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; ice_vsi_clear(vsi); } return status; } /** - * ice_determine_q_usage - Calculate queue distribution - * @pf: board private structure - * - * Return -ENOMEM if we don't get enough queues for all ports + * ice_get_avail_q_count - Get count of queues in use + * @pf_qmap: bitmap to get queue use count from + * @lock: pointer to a mutex that protects access to pf_qmap + * @size: size of the bitmap */ -static void ice_determine_q_usage(struct ice_pf *pf) +static u16 +ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) { - u16 q_left_tx, q_left_rx; + u16 count = 0, bit; - q_left_tx = pf->hw.func_caps.common_cap.num_txq; - q_left_rx = pf->hw.func_caps.common_cap.num_rxq; + mutex_lock(lock); + for_each_clear_bit(bit, pf_qmap, size) + count++; + mutex_unlock(lock); - pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); + return count; +} - /* only 1 Rx queue unless RSS is enabled */ - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - pf->num_lan_rx = 1; - else - pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus()); +/** + * ice_get_avail_txq_count - Get count of Tx queues in use + * @pf: pointer to an ice_pf instance + */ +u16 ice_get_avail_txq_count(struct ice_pf *pf) +{ + return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, + pf->max_pf_txqs); +} - pf->q_left_tx = q_left_tx - pf->num_lan_tx; - pf->q_left_rx = q_left_rx - pf->num_lan_rx; +/** + * ice_get_avail_rxq_count - Get count of Rx queues in use + * @pf: pointer to an ice_pf instance + */ +u16 ice_get_avail_rxq_count(struct ice_pf *pf) +{ + return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, + pf->max_pf_rxqs); } /** @@ -2082,43 +2301,74 @@ static void ice_deinit_pf(struct ice_pf *pf) ice_service_task_stop(pf); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->avail_q_mutex); + + if (pf->avail_txqs) { + bitmap_free(pf->avail_txqs); + pf->avail_txqs = NULL; + } + + if (pf->avail_rxqs) { + bitmap_free(pf->avail_rxqs); + pf->avail_rxqs = NULL; + } } /** - * ice_init_pf - Initialize general software structures (struct ice_pf) - * @pf: board private structure to initialize + * ice_set_pf_caps - set PFs capability flags + * @pf: pointer to the PF instance */ -static void ice_init_pf(struct ice_pf *pf) +static void ice_set_pf_caps(struct ice_pf *pf) { - bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); - set_bit(ICE_FLAG_MSIX_ENA, pf->flags); -#ifdef CONFIG_PCI_IOV - if (pf->hw.func_caps.common_cap.sr_iov_1_1) { - struct ice_hw *hw = &pf->hw; + struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + if (func_caps->common_cap.dcb) + set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); +#ifdef CONFIG_PCI_IOV + clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); + if (func_caps->common_cap.sr_iov_1_1) { set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); - pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs, + pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, ICE_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ + clear_bit(ICE_FLAG_RSS_ENA, pf->flags); + if (func_caps->common_cap.rss_table_size) + set_bit(ICE_FLAG_RSS_ENA, pf->flags); - mutex_init(&pf->sw_mutex); - mutex_init(&pf->avail_q_mutex); + pf->max_pf_txqs = func_caps->common_cap.num_txq; + pf->max_pf_rxqs = func_caps->common_cap.num_rxq; +} - /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */ - mutex_lock(&pf->avail_q_mutex); - bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS); - bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS); - mutex_unlock(&pf->avail_q_mutex); +/** + * ice_init_pf - Initialize general software structures (struct ice_pf) + * @pf: board private structure to initialize + */ +static int ice_init_pf(struct ice_pf *pf) +{ + ice_set_pf_caps(pf); - if (pf->hw.func_caps.common_cap.rss_table_size) - set_bit(ICE_FLAG_RSS_ENA, pf->flags); + mutex_init(&pf->sw_mutex); /* setup service timer and periodic service task */ timer_setup(&pf->serv_tmr, ice_service_timer, 0); pf->serv_tmr_period = HZ; INIT_WORK(&pf->serv_task, ice_service_task); clear_bit(__ICE_SERVICE_SCHED, pf->state); + + mutex_init(&pf->avail_q_mutex); + pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); + if (!pf->avail_txqs) + return -ENOMEM; + + pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); + if (!pf->avail_rxqs) { + devm_kfree(&pf->pdev->dev, pf->avail_txqs); + pf->avail_txqs = NULL; + return -ENOMEM; + } + + return 0; } /** @@ -2137,13 +2387,18 @@ static int ice_ena_msix_range(struct ice_pf *pf) /* reserve one vector for miscellaneous handler */ needed = 1; + if (v_left < needed) + goto no_hw_vecs_left_err; v_budget += needed; v_left -= needed; /* reserve vectors for LAN traffic */ - pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); - v_budget += pf->num_lan_msix; - v_left -= pf->num_lan_msix; + needed = min_t(int, num_online_cpus(), v_left); + if (v_left < needed) + goto no_hw_vecs_left_err; + pf->num_lan_msix = needed; + v_budget += needed; + v_left -= needed; pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, sizeof(*pf->msix_entries), GFP_KERNEL); @@ -2168,18 +2423,18 @@ static int ice_ena_msix_range(struct ice_pf *pf) if (v_actual < v_budget) { dev_warn(&pf->pdev->dev, - "not enough vectors. requested = %d, obtained = %d\n", + "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", v_budget, v_actual); - if (v_actual >= (pf->num_lan_msix + 1)) { - pf->num_avail_sw_msix = v_actual - - (pf->num_lan_msix + 1); - } else if (v_actual >= 2) { - pf->num_lan_msix = 1; - pf->num_avail_sw_msix = v_actual - 2; - } else { +/* 2 vectors for LAN (traffic + OICR) */ +#define ICE_MIN_LAN_VECS 2 + + if (v_actual < ICE_MIN_LAN_VECS) { + /* error if we can't get minimum vectors */ pci_disable_msix(pf->pdev); err = -ERANGE; goto msix_err; + } else { + pf->num_lan_msix = ICE_MIN_LAN_VECS; } } @@ -2189,9 +2444,13 @@ msix_err: devm_kfree(&pf->pdev->dev, pf->msix_entries); goto exit_err; +no_hw_vecs_left_err: + dev_err(&pf->pdev->dev, + "not enough device MSI-X vectors. requested = %d, available = %d\n", + needed, v_left); + err = -ERANGE; exit_err: pf->num_lan_msix = 0; - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); return err; } @@ -2204,7 +2463,6 @@ static void ice_dis_msix(struct ice_pf *pf) pci_disable_msix(pf->pdev); devm_kfree(&pf->pdev->dev, pf->msix_entries); pf->msix_entries = NULL; - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); } /** @@ -2213,8 +2471,7 @@ static void ice_dis_msix(struct ice_pf *pf) */ static void ice_clear_interrupt_scheme(struct ice_pf *pf) { - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_dis_msix(pf); + ice_dis_msix(pf); if (pf->irq_tracker) { devm_kfree(&pf->pdev->dev, pf->irq_tracker); @@ -2230,10 +2487,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) { int vectors; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - vectors = ice_ena_msix_range(pf); - else - return -ENODEV; + vectors = ice_ena_msix_range(pf); if (vectors < 0) return vectors; @@ -2256,6 +2510,163 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) } /** + * ice_log_pkg_init - log result of DDP package load + * @hw: pointer to hardware info + * @status: status of package load + */ +static void +ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) +{ + struct ice_pf *pf = (struct ice_pf *)hw->back; + struct device *dev = &pf->pdev->dev; + + switch (*status) { + case ICE_SUCCESS: + /* The package download AdminQ command returned success because + * this download succeeded or ICE_ERR_AQ_NO_WORK since there is + * already a package loaded on the device. + */ + if (hw->pkg_ver.major == hw->active_pkg_ver.major && + hw->pkg_ver.minor == hw->active_pkg_ver.minor && + hw->pkg_ver.update == hw->active_pkg_ver.update && + hw->pkg_ver.draft == hw->active_pkg_ver.draft && + !memcmp(hw->pkg_name, hw->active_pkg_name, + sizeof(hw->pkg_name))) { + if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) + dev_info(dev, + "DDP package already present on device: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + else + dev_info(dev, + "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { + dev_err(dev, + "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + *status = ICE_ERR_NOT_SUPPORTED; + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { + dev_info(dev, + "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft, + hw->pkg_name, + hw->pkg_ver.major, + hw->pkg_ver.minor, + hw->pkg_ver.update, + hw->pkg_ver.draft); + } else { + dev_err(dev, + "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); + *status = ICE_ERR_NOT_SUPPORTED; + } + break; + case ICE_ERR_BUF_TOO_SHORT: + /* fall-through */ + case ICE_ERR_CFG: + dev_err(dev, + "The DDP package file is invalid. Entering Safe Mode.\n"); + break; + case ICE_ERR_NOT_SUPPORTED: + /* Package File version not supported */ + if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || + (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) + dev_err(dev, + "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); + else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || + (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) + dev_err(dev, + "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_ERR_AQ_ERROR: + switch (hw->adminq.sq_last_status) { + case ICE_AQ_RC_ENOSEC: + case ICE_AQ_RC_EBADSIG: + dev_err(dev, + "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); + return; + case ICE_AQ_RC_ESVN: + dev_err(dev, + "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); + return; + case ICE_AQ_RC_EBADMAN: + case ICE_AQ_RC_EBADBUF: + dev_err(dev, + "An error occurred on the device while loading the DDP package. The device will be reset.\n"); + return; + default: + break; + } + /* fall-through */ + default: + dev_err(dev, + "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", + *status); + break; + } +} + +/** + * ice_load_pkg - load/reload the DDP Package file + * @firmware: firmware structure when firmware requested or NULL for reload + * @pf: pointer to the PF instance + * + * Called on probe and post CORER/GLOBR rebuild to load DDP Package and + * initialize HW tables. + */ +static void +ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) +{ + enum ice_status status = ICE_ERR_PARAM; + struct device *dev = &pf->pdev->dev; + struct ice_hw *hw = &pf->hw; + + /* Load DDP Package */ + if (firmware && !hw->pkg_copy) { + status = ice_copy_and_init_pkg(hw, firmware->data, + firmware->size); + ice_log_pkg_init(hw, &status); + } else if (!firmware && hw->pkg_copy) { + /* Reload package during rebuild after CORER/GLOBR reset */ + status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); + ice_log_pkg_init(hw, &status); + } else { + dev_err(dev, + "The DDP package file failed to load. Entering Safe Mode.\n"); + } + + if (status) { + /* Safe Mode */ + clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); + return; + } + + /* Successful download package is the precondition for advanced + * features, hence setting the ICE_FLAG_ADV_FEATURES flag + */ + set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); +} + +/** * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines * @pf: pointer to the PF structure * @@ -2272,6 +2683,105 @@ static void ice_verify_cacheline_size(struct ice_pf *pf) } /** + * ice_send_version - update firmware with driver version + * @pf: PF struct + * + * Returns ICE_SUCCESS on success, else error code + */ +static enum ice_status ice_send_version(struct ice_pf *pf) +{ + struct ice_driver_ver dv; + + dv.major_ver = DRV_VERSION_MAJOR; + dv.minor_ver = DRV_VERSION_MINOR; + dv.build_ver = DRV_VERSION_BUILD; + dv.subbuild_ver = 0; + strscpy((char *)dv.driver_string, DRV_VERSION, + sizeof(dv.driver_string)); + return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); +} + +/** + * ice_get_opt_fw_name - return optional firmware file name or NULL + * @pf: pointer to the PF instance + */ +static char *ice_get_opt_fw_name(struct ice_pf *pf) +{ + /* Optional firmware name same as default with additional dash + * followed by a EUI-64 identifier (PCIe Device Serial Number) + */ + struct pci_dev *pdev = pf->pdev; + char *opt_fw_filename = NULL; + u32 dword; + u8 dsn[8]; + int pos; + + /* Determine the name of the optional file using the DSN (two + * dwords following the start of the DSN Capability). + */ + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + if (pos) { + opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); + if (!opt_fw_filename) + return NULL; + + pci_read_config_dword(pdev, pos + 4, &dword); + put_unaligned_le32(dword, &dsn[0]); + pci_read_config_dword(pdev, pos + 8, &dword); + put_unaligned_le32(dword, &dsn[4]); + snprintf(opt_fw_filename, NAME_MAX, + "%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg", + ICE_DDP_PKG_PATH, + dsn[7], dsn[6], dsn[5], dsn[4], + dsn[3], dsn[2], dsn[1], dsn[0]); + } + + return opt_fw_filename; +} + +/** + * ice_request_fw - Device initialization routine + * @pf: pointer to the PF instance + */ +static void ice_request_fw(struct ice_pf *pf) +{ + char *opt_fw_filename = ice_get_opt_fw_name(pf); + const struct firmware *firmware = NULL; + struct device *dev = &pf->pdev->dev; + int err = 0; + + /* optional device-specific DDP (if present) overrides the default DDP + * package file. kernel logs a debug message if the file doesn't exist, + * and warning messages for other errors. + */ + if (opt_fw_filename) { + err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); + if (err) { + kfree(opt_fw_filename); + goto dflt_pkg_load; + } + + /* request for firmware was successful. Download to device */ + ice_load_pkg(firmware, pf); + kfree(opt_fw_filename); + release_firmware(firmware); + return; + } + +dflt_pkg_load: + err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); + if (err) { + dev_err(dev, + "The DDP package file was not found or could not be read. Entering Safe Mode\n"); + return; + } + + /* request for firmware was successful. Download to device */ + ice_load_pkg(firmware, pf); + release_firmware(firmware); +} + +/** * ice_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in ice_pci_tbl @@ -2345,22 +2855,33 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_exit_unroll; } - dev_info(dev, "firmware %d.%d.%05d api %d.%d\n", - hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, - hw->api_maj_ver, hw->api_min_ver); + dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n", + hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch, + hw->api_maj_ver, hw->api_min_ver, hw->api_patch, + ice_nvm_version_str(hw), hw->fw_build); - ice_init_pf(pf); + ice_request_fw(pf); - err = ice_init_pf_dcb(pf, false); - if (err) { - clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - clear_bit(ICE_FLAG_DCB_ENA, pf->flags); - - /* do not fail overall init if DCB init fails */ - err = 0; + /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be + * set in pf->state, which will cause ice_is_safe_mode to return + * true + */ + if (ice_is_safe_mode(pf)) { + dev_err(dev, + "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); + /* we already got function/device capabilities but these don't + * reflect what the driver needs to do in safe mode. Instead of + * adding conditional logic everywhere to ignore these + * device/function capabilities, override them. + */ + ice_set_safe_mode_caps(hw); } - ice_determine_q_usage(pf); + err = ice_init_pf(pf); + if (err) { + dev_err(dev, "ice_init_pf failed: %d\n", err); + goto err_init_pf_unroll; + } pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; if (!pf->num_alloc_vsi) { @@ -2390,12 +2911,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) * the misc functionality and queue processing is combined in * the same vector and that gets setup at open. */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - err = ice_req_irq_msix_misc(pf); - if (err) { - dev_err(dev, "setup of misc vector failed: %d\n", err); - goto err_init_interrupt_unroll; - } + err = ice_req_irq_msix_misc(pf); + if (err) { + dev_err(dev, "setup of misc vector failed: %d\n", err); + goto err_init_interrupt_unroll; } /* create switch struct for the switch element created by FW on boot */ @@ -2423,6 +2942,15 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) clear_bit(__ICE_SERVICE_DIS, pf->state); + /* tell the firmware we are up */ + err = ice_send_version(pf); + if (err) { + dev_err(dev, + "probe failed sending driver version %s. error: %d\n", + ice_drv_ver, err); + goto err_alloc_sw_unroll; + } + /* since everything is good, start the service timer */ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); @@ -2434,6 +2962,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_verify_cacheline_size(pf); + /* If no DDP driven features have to be setup, return here */ + if (ice_is_safe_mode(pf)) + return 0; + + /* initialize DDP driven features */ + + /* Note: DCB init failure is non-fatal to load */ + if (ice_init_pf_dcb(pf, false)) { + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); + } else { + ice_cfg_lldp_mib_change(&pf->hw, true); + } + return 0; err_alloc_sw_unroll: @@ -2483,9 +3025,14 @@ static void ice_remove(struct pci_dev *pdev) continue; ice_vsi_free_q_vectors(pf->vsi[i]); } - ice_clear_interrupt_scheme(pf); ice_deinit_pf(pf); ice_deinit_hw(&pf->hw); + ice_clear_interrupt_scheme(pf); + /* Issue a PFR as part of the prescribed driver unload flow. Do not + * do it via ice_schedule_reset() since there is no need to rebuild + * and the service task is already stopped. + */ + ice_reset(&pf->hw, ICE_RESET_PFR); pci_disable_pcie_error_reporting(pdev); } @@ -2711,10 +3258,8 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) struct ice_hw *hw = &pf->hw; struct sockaddr *addr = pi; enum ice_status status; - LIST_HEAD(a_mac_list); - LIST_HEAD(r_mac_list); u8 flags = 0; - int err; + int err = 0; u8 *mac; mac = (u8 *)addr->sa_data; @@ -2737,42 +3282,23 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) /* When we change the MAC address we also have to change the MAC address * based filter rules that were created previously for the old MAC * address. So first, we remove the old filter rule using ice_remove_mac - * and then create a new filter rule using ice_add_mac. Note that for - * both these operations, we first need to form a "list" of MAC - * addresses (even though in this case, we have only 1 MAC address to be - * added/removed) and this done using ice_add_mac_to_list. Depending on - * the ensuing operation this "list" of MAC addresses is either to be - * added or removed from the filter. + * and then create a new filter rule using ice_add_mac via + * ice_vsi_cfg_mac_fltr function call for both add and/or remove + * filters. */ - err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr); - if (err) { - err = -EADDRNOTAVAIL; - goto free_lists; - } - - status = ice_remove_mac(hw, &r_mac_list); + status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false); if (status) { err = -EADDRNOTAVAIL; - goto free_lists; + goto err_update_filters; } - err = ice_add_mac_to_list(vsi, &a_mac_list, mac); - if (err) { - err = -EADDRNOTAVAIL; - goto free_lists; - } - - status = ice_add_mac(hw, &a_mac_list); + status = ice_vsi_cfg_mac_fltr(vsi, mac, true); if (status) { err = -EADDRNOTAVAIL; - goto free_lists; + goto err_update_filters; } -free_lists: - /* free list entries */ - ice_free_fltr_list(&pf->pdev->dev, &r_mac_list); - ice_free_fltr_list(&pf->pdev->dev, &a_mac_list); - +err_update_filters: if (err) { netdev_err(netdev, "can't set MAC %pM. filter update failed\n", mac); @@ -2788,8 +3314,8 @@ free_lists: flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; status = ice_aq_manage_mac_write(hw, mac, flags, NULL); if (status) { - netdev_err(netdev, "can't set MAC %pM. write to firmware failed.\n", - mac); + netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", + mac, status); } return 0; } @@ -2902,6 +3428,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) struct ice_vsi *vsi = np->vsi; int ret = 0; + /* Don't set any netdev advanced features with device in Safe Mode */ + if (ice_is_safe_mode(vsi->back)) { + dev_err(&vsi->back->pdev->dev, + "Device is in Safe Mode - not enabling advanced netdev features\n"); + return ret; + } + /* Multiple features can be changed in one call so keep features in * separate if/else statements to guarantee each feature is checked */ @@ -3008,10 +3541,7 @@ static int ice_up_complete(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; int err; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_vsi_cfg_msix(vsi); - else - return -ENOTSUPP; + ice_vsi_cfg_msix(vsi); /* Enable only Rx rings, Tx rings were enabled by the FW when the * Tx queue group list was configured and the context bits were @@ -3132,7 +3662,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) * ice_update_vsi_stats - Update VSI stats counters * @vsi: the VSI to be updated */ -static void ice_update_vsi_stats(struct ice_vsi *vsi) +void ice_update_vsi_stats(struct ice_vsi *vsi) { struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; struct ice_eth_stats *cur_es = &vsi->eth_stats; @@ -3159,6 +3689,8 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi) cur_ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; cur_ns->rx_length_errors = pf->stats.rx_len_errors; + /* record drops from the port level */ + cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; } } @@ -3166,149 +3698,139 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi) * ice_update_pf_stats - Update PF port stats counters * @pf: PF whose stats needs to be updated */ -static void ice_update_pf_stats(struct ice_pf *pf) +void ice_update_pf_stats(struct ice_pf *pf) { struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw *hw = &pf->hw; - u8 pf_id; + u8 port; + port = hw->port_info->lport; prev_ps = &pf->stats_prev; cur_ps = &pf->stats; - pf_id = hw->pf_id; - ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, + ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, + &prev_ps->eth.rx_bytes, &cur_ps->eth.rx_bytes); - ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, + ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, + &prev_ps->eth.rx_unicast, &cur_ps->eth.rx_unicast); - ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, + ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, + &prev_ps->eth.rx_multicast, &cur_ps->eth.rx_multicast); - ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, + ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, + &prev_ps->eth.rx_broadcast, &cur_ps->eth.rx_broadcast); - ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, + ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, + &prev_ps->eth.rx_discards, + &cur_ps->eth.rx_discards); + + ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, + &prev_ps->eth.tx_bytes, &cur_ps->eth.tx_bytes); - ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, + ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, + &prev_ps->eth.tx_unicast, &cur_ps->eth.tx_unicast); - ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, + ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, + &prev_ps->eth.tx_multicast, &cur_ps->eth.tx_multicast); - ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, + ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, + &prev_ps->eth.tx_broadcast, &cur_ps->eth.tx_broadcast); - ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, &prev_ps->tx_dropped_link_down, &cur_ps->tx_dropped_link_down); - ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_64, - &cur_ps->rx_size_64); + ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, + &prev_ps->rx_size_64, &cur_ps->rx_size_64); - ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_127, - &cur_ps->rx_size_127); + ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, + &prev_ps->rx_size_127, &cur_ps->rx_size_127); - ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_255, - &cur_ps->rx_size_255); + ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, + &prev_ps->rx_size_255, &cur_ps->rx_size_255); - ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_511, - &cur_ps->rx_size_511); + ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, + &prev_ps->rx_size_511, &cur_ps->rx_size_511); - ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), - GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); - ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), - GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); - ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), - GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, &prev_ps->rx_size_big, &cur_ps->rx_size_big); - ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_64, - &cur_ps->tx_size_64); + ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, + &prev_ps->tx_size_64, &cur_ps->tx_size_64); - ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_127, - &cur_ps->tx_size_127); + ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, + &prev_ps->tx_size_127, &cur_ps->tx_size_127); - ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_255, - &cur_ps->tx_size_255); + ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, + &prev_ps->tx_size_255, &cur_ps->tx_size_255); - ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_511, - &cur_ps->tx_size_511); + ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, + &prev_ps->tx_size_511, &cur_ps->tx_size_511); - ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), - GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); - ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), - GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); - ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), - GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, &prev_ps->tx_size_big, &cur_ps->tx_size_big); - ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); - ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); - ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); - ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); ice_update_dcb_stats(pf); - ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, &prev_ps->crc_errors, &cur_ps->crc_errors); - ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); - ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, &prev_ps->mac_local_faults, &cur_ps->mac_local_faults); - ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, &prev_ps->mac_remote_faults, &cur_ps->mac_remote_faults); - ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); - ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, &prev_ps->rx_undersize, &cur_ps->rx_undersize); - ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, &prev_ps->rx_fragments, &cur_ps->rx_fragments); - ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, &prev_ps->rx_oversize, &cur_ps->rx_oversize); - ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, &prev_ps->rx_jabber, &cur_ps->rx_jabber); pf->stat_prev_loaded = true; @@ -3328,12 +3850,16 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) vsi_stats = &vsi->net_stats; - if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq) + if (!vsi->num_txq || !vsi->num_rxq) return; + /* netdev packet/byte stats come from ring counter. These are obtained * by summing up ring counters (done by ice_update_vsi_ring_stats). + * But, only call the update routine and read the registers if VSI is + * not down. */ - ice_update_vsi_ring_stats(vsi); + if (!test_bit(__ICE_DOWN, vsi->state)) + ice_update_vsi_ring_stats(vsi); stats->tx_packets = vsi_stats->tx_packets; stats->tx_bytes = vsi_stats->tx_bytes; stats->rx_packets = vsi_stats->rx_packets; @@ -3372,85 +3898,6 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) } /** - * ice_force_phys_link_state - Force the physical link state - * @vsi: VSI to force the physical link state to up/down - * @link_up: true/false indicates to set the physical link to up/down - * - * Force the physical link state by getting the current PHY capabilities from - * hardware and setting the PHY config based on the determined capabilities. If - * link changes a link event will be triggered because both the Enable Automatic - * Link Update and LESM Enable bits are set when setting the PHY capabilities. - * - * Returns 0 on success, negative on failure - */ -static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) -{ - struct ice_aqc_get_phy_caps_data *pcaps; - struct ice_aqc_set_phy_cfg_data *cfg; - struct ice_port_info *pi; - struct device *dev; - int retcode; - - if (!vsi || !vsi->port_info || !vsi->back) - return -EINVAL; - if (vsi->type != ICE_VSI_PF) - return 0; - - dev = &vsi->back->pdev->dev; - - pi = vsi->port_info; - - pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); - if (!pcaps) - return -ENOMEM; - - retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, - NULL); - if (retcode) { - dev_err(dev, - "Failed to get phy capabilities, VSI %d error %d\n", - vsi->vsi_num, retcode); - retcode = -EIO; - goto out; - } - - /* No change in link */ - if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && - link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) - goto out; - - cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); - if (!cfg) { - retcode = -ENOMEM; - goto out; - } - - cfg->phy_type_low = pcaps->phy_type_low; - cfg->phy_type_high = pcaps->phy_type_high; - cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - cfg->low_power_ctrl = pcaps->low_power_ctrl; - cfg->eee_cap = pcaps->eee_cap; - cfg->eeer_value = pcaps->eeer_value; - cfg->link_fec_opt = pcaps->link_fec_options; - if (link_up) - cfg->caps |= ICE_AQ_PHY_ENA_LINK; - else - cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; - - retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); - if (retcode) { - dev_err(dev, "Failed to set phy config, VSI %d error %d\n", - vsi->vsi_num, retcode); - retcode = -EIO; - } - - devm_kfree(dev, cfg); -out: - devm_kfree(dev, pcaps); - return retcode; -} - -/** * ice_down - Shutdown the connection * @vsi: The VSI being stopped */ @@ -3559,24 +4006,6 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) } /** - * ice_vsi_req_irq - Request IRQ from the OS - * @vsi: The VSI IRQ is being requested for - * @basename: name for the vector - * - * Return 0 on success and a negative value on error - */ -static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) -{ - struct ice_pf *pf = vsi->back; - int err = -EINVAL; - - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - err = ice_vsi_req_irq_msix(vsi, basename); - - return err; -} - -/** * ice_vsi_open - Called when a network interface is made active * @vsi: the VSI to open * @@ -3605,7 +4034,7 @@ static int ice_vsi_open(struct ice_vsi *vsi) snprintf(int_name, sizeof(int_name) - 1, "%s-%s", dev_driver_string(&pf->pdev->dev), vsi->netdev->name); - err = ice_vsi_req_irq(vsi, int_name); + err = ice_vsi_req_irq_msix(vsi, int_name); if (err) goto err_setup_rx; @@ -3669,23 +4098,19 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) int err = 0; if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) - return err; + return 0; clear_bit(__ICE_NEEDS_RESTART, vsi->state); if (vsi->netdev && vsi->type == ICE_VSI_PF) { - struct net_device *netd = vsi->netdev; - if (netif_running(vsi->netdev)) { - if (locked) { - err = netd->netdev_ops->ndo_open(netd); - } else { + if (!locked) rtnl_lock(); - err = netd->netdev_ops->ndo_open(netd); + + err = ice_open(vsi->netdev); + + if (!locked) rtnl_unlock(); - } - } else { - err = ice_vsi_open(vsi); } } @@ -3699,9 +4124,6 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) */ #ifdef CONFIG_DCB int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) -#else -static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) -#endif /* CONFIG_DCB */ { int v; @@ -3712,91 +4134,107 @@ static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) return 0; } +#endif /* CONFIG_DCB */ /** - * ice_vsi_rebuild_all - rebuild all VSIs in PF - * @pf: the PF + * ice_vsi_rebuild_by_type - Rebuild VSI of a given type + * @pf: pointer to the PF instance + * @type: VSI type to rebuild + * + * Iterates through the pf->vsi array and rebuilds VSIs of the requested type */ -static int ice_vsi_rebuild_all(struct ice_pf *pf) +static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) { - int i; + enum ice_status status; + int i, err; - /* loop through pf->vsi array and reinit the VSI if found */ ice_for_each_vsi(pf, i) { - int err; + struct ice_vsi *vsi = pf->vsi[i]; - if (!pf->vsi[i]) + if (!vsi || vsi->type != type) continue; - err = ice_vsi_rebuild(pf->vsi[i]); + /* rebuild the VSI */ + err = ice_vsi_rebuild(vsi); if (err) { dev_err(&pf->pdev->dev, - "VSI at index %d rebuild failed\n", - pf->vsi[i]->idx); + "rebuild VSI failed, err %d, VSI index %d, type %d\n", + err, vsi->idx, type); return err; } - dev_info(&pf->pdev->dev, - "VSI at index %d rebuilt. vsi_num = 0x%x\n", - pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + /* replay filters for the VSI */ + status = ice_replay_vsi(&pf->hw, vsi->idx); + if (status) { + dev_err(&pf->pdev->dev, + "replay VSI failed, status %d, VSI index %d, type %d\n", + status, vsi->idx, type); + return -EIO; + } + + /* Re-map HW VSI number, using VSI handle that has been + * previously validated in ice_replay_vsi() call above + */ + vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); + + /* enable the VSI */ + err = ice_ena_vsi(vsi, false); + if (err) { + dev_err(&pf->pdev->dev, + "enable VSI failed, err %d, VSI index %d, type %d\n", + err, vsi->idx, type); + return err; + } + + dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n", + vsi->idx, type); } return 0; } /** - * ice_vsi_replay_all - replay all VSIs configuration in the PF - * @pf: the PF + * ice_update_pf_netdev_link - Update PF netdev link status + * @pf: pointer to the PF instance */ -static int ice_vsi_replay_all(struct ice_pf *pf) +static void ice_update_pf_netdev_link(struct ice_pf *pf) { - struct ice_hw *hw = &pf->hw; - enum ice_status ret; + bool link_up; int i; - /* loop through pf->vsi array and replay the VSI if found */ ice_for_each_vsi(pf, i) { - if (!pf->vsi[i]) - continue; + struct ice_vsi *vsi = pf->vsi[i]; - ret = ice_replay_vsi(hw, pf->vsi[i]->idx); - if (ret) { - dev_err(&pf->pdev->dev, - "VSI at index %d replay failed %d\n", - pf->vsi[i]->idx, ret); - return -EIO; - } - - /* Re-map HW VSI number, using VSI handle that has been - * previously validated in ice_replay_vsi() call above - */ - pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx); + if (!vsi || vsi->type != ICE_VSI_PF) + return; - dev_info(&pf->pdev->dev, - "VSI at index %d filter replayed successfully - vsi_num %i\n", - pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + ice_get_link_status(pf->vsi[i]->port_info, &link_up); + if (link_up) { + netif_carrier_on(pf->vsi[i]->netdev); + netif_tx_wake_all_queues(pf->vsi[i]->netdev); + } else { + netif_carrier_off(pf->vsi[i]->netdev); + netif_tx_stop_all_queues(pf->vsi[i]->netdev); + } } - - /* Clean up replay filter after successful re-configuration */ - ice_replay_post(hw); - return 0; } /** * ice_rebuild - rebuild after reset * @pf: PF to rebuild + * @reset_type: type of reset */ -static void ice_rebuild(struct ice_pf *pf) +static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) { struct device *dev = &pf->pdev->dev; struct ice_hw *hw = &pf->hw; enum ice_status ret; - int err, i; + int err; if (test_bit(__ICE_DOWN, pf->state)) goto clear_recovery; - dev_dbg(dev, "rebuilding PF\n"); + dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); ret = ice_init_all_ctrlq(hw); if (ret) { @@ -3804,6 +4242,16 @@ static void ice_rebuild(struct ice_pf *pf) goto err_init_ctrlq; } + /* if DDP was previously loaded successfully */ + if (!ice_is_safe_mode(pf)) { + /* reload the SW DB of filter tables */ + if (reset_type == ICE_RESET_PFR) + ice_fill_blk_tbls(hw); + else + /* Reload DDP Package after CORER/GLOBR reset */ + ice_load_pkg(NULL, pf); + } + ret = ice_clear_pf_cfg(hw); if (ret) { dev_err(dev, "clear PF configuration failed %d\n", ret); @@ -3822,65 +4270,53 @@ static void ice_rebuild(struct ice_pf *pf) if (err) goto err_sched_init_port; - ice_dcb_rebuild(pf); + err = ice_update_link_info(hw->port_info); + if (err) + dev_err(&pf->pdev->dev, "Get link status error %d\n", err); - err = ice_vsi_rebuild_all(pf); + /* start misc vector */ + err = ice_req_irq_msix_misc(pf); if (err) { - dev_err(dev, "ice_vsi_rebuild_all failed\n"); - goto err_vsi_rebuild; + dev_err(dev, "misc vector setup failed: %d\n", err); + goto err_sched_init_port; } - err = ice_update_link_info(hw->port_info); - if (err) - dev_err(&pf->pdev->dev, "Get link status error %d\n", err); + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) + ice_dcb_rebuild(pf); - /* Replay all VSIs Configuration, including filters after reset */ - if (ice_vsi_replay_all(pf)) { - dev_err(&pf->pdev->dev, - "error replaying VSI configurations with switch filter rules\n"); + /* rebuild PF VSI */ + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); + if (err) { + dev_err(dev, "PF VSI rebuild failed: %d\n", err); goto err_vsi_rebuild; } - /* start misc vector */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - err = ice_req_irq_msix_misc(pf); + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF); if (err) { - dev_err(dev, "misc vector setup failed: %d\n", err); + dev_err(dev, "VF VSI rebuild failed: %d\n", err); goto err_vsi_rebuild; } } - /* restart the VSIs that were rebuilt and running before the reset */ - err = ice_pf_ena_all_vsi(pf, false); - if (err) { - dev_err(&pf->pdev->dev, "error enabling VSIs\n"); - /* no need to disable VSIs in tear down path in ice_rebuild() - * since its already taken care in ice_vsi_open() - */ + ice_update_pf_netdev_link(pf); + + /* tell the firmware we are up */ + ret = ice_send_version(pf); + if (ret) { + dev_err(dev, + "Rebuild failed due to error sending driver version: %d\n", + ret); goto err_vsi_rebuild; } - ice_for_each_vsi(pf, i) { - bool link_up; - - if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) - continue; - ice_get_link_status(pf->vsi[i]->port_info, &link_up); - if (link_up) { - netif_carrier_on(pf->vsi[i]->netdev); - netif_tx_wake_all_queues(pf->vsi[i]->netdev); - } else { - netif_carrier_off(pf->vsi[i]->netdev); - netif_tx_stop_all_queues(pf->vsi[i]->netdev); - } - } + ice_replay_post(hw); /* if we get here, reset flow is successful */ clear_bit(__ICE_RESET_FAILED, pf->state); return; err_vsi_rebuild: - ice_vsi_release_all(pf); err_sched_init_port: ice_sched_cleanup_all(hw); err_init_ctrlq: @@ -4244,9 +4680,7 @@ static void ice_tx_timeout(struct net_device *netdev) head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; /* Read interrupt register */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - val = rd32(hw, - GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); + val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", vsi->vsi_num, hung_queue, tx_ring->next_to_clean, @@ -4295,6 +4729,7 @@ int ice_open(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + struct ice_port_info *pi; int err; if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { @@ -4304,13 +4739,33 @@ int ice_open(struct net_device *netdev) netif_carrier_off(netdev); - err = ice_force_phys_link_state(vsi, true); + pi = vsi->port_info; + err = ice_update_link_info(pi); if (err) { - netdev_err(netdev, - "Failed to set physical link up, error %d\n", err); + netdev_err(netdev, "Failed to get link info, error %d\n", + err); return err; } + /* Set PHY if there is media, otherwise, turn off PHY */ + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { + err = ice_force_phys_link_state(vsi, true); + if (err) { + netdev_err(netdev, + "Failed to set physical link up, error %d\n", + err); + return err; + } + } else { + err = ice_aq_set_link_restart_an(pi, false, NULL); + if (err) { + netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n", + vsi->vsi_num, err); + return err; + } + set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags); + } + err = ice_vsi_open(vsi); if (err) netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", @@ -4388,6 +4843,17 @@ out_rm_features: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } +static const struct net_device_ops ice_netdev_safe_mode_ops = { + .ndo_open = ice_open, + .ndo_stop = ice_stop, + .ndo_start_xmit = ice_start_xmit, + .ndo_set_mac_address = ice_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = ice_change_mtu, + .ndo_get_stats64 = ice_get_stats64, + .ndo_tx_timeout = ice_tx_timeout, +}; + static const struct net_device_ops ice_netdev_ops = { .ndo_open = ice_open, .ndo_stop = ice_stop, diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2a232504379d..fc624b73d05d 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -260,33 +260,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, /** * ice_sched_get_first_node - get the first node of the given layer - * @hw: pointer to the HW struct + * @pi: port information structure * @parent: pointer the base node of the subtree * @layer: layer number * * This function retrieves the first node of the given layer from the subtree */ static struct ice_sched_node * -ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent, - u8 layer) +ice_sched_get_first_node(struct ice_port_info *pi, + struct ice_sched_node *parent, u8 layer) { - u8 i; - - if (layer < hw->sw_entry_point_layer) - return NULL; - for (i = 0; i < parent->num_children; i++) { - struct ice_sched_node *node = parent->children[i]; - - if (node) { - if (node->tx_sched_layer == layer) - return node; - /* this recursion is intentional, and wouldn't - * go more than 9 calls - */ - return ice_sched_get_first_node(hw, node, layer); - } - } - return NULL; + return pi->sib_head[parent->tc_num][layer]; } /** @@ -300,7 +284,7 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) { u8 i; - if (!pi) + if (!pi || !pi->root) return NULL; for (i = 0; i < pi->root->num_children; i++) if (pi->root->children[i]->tc_num == tc) @@ -342,7 +326,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) parent = node->parent; /* root has no parent */ if (parent) { - struct ice_sched_node *p, *tc_node; + struct ice_sched_node *p; /* update the parent */ for (i = 0; i < parent->num_children; i++) @@ -354,16 +338,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) break; } - /* search for previous sibling that points to this node and - * remove the reference - */ - tc_node = ice_sched_get_tc_node(pi, node->tc_num); - if (!tc_node) { - ice_debug(hw, ICE_DBG_SCHED, - "Invalid TC number %d\n", node->tc_num); - goto err_exit; - } - p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer); + p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); while (p) { if (p->sibling == node) { p->sibling = node->sibling; @@ -371,8 +346,13 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) } p = p->sibling; } + + /* update the sibling head if head is getting removed */ + if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) + pi->sib_head[node->tc_num][node->tx_sched_layer] = + node->sibling; } -err_exit: + /* leaf nodes have no children */ if (node->children) devm_kfree(ice_hw_to_dev(hw), node->children); @@ -743,13 +723,17 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, /* add it to previous node sibling pointer */ /* Note: siblings are not linked across branches */ - prev = ice_sched_get_first_node(hw, tc_node, layer); + prev = ice_sched_get_first_node(pi, tc_node, layer); if (prev && prev != new_node) { while (prev->sibling) prev = prev->sibling; prev->sibling = new_node; } + /* initialize the sibling head */ + if (!pi->sib_head[tc_node->tc_num][layer]) + pi->sib_head[tc_node->tc_num][layer] = new_node; + if (i == 0) *first_node_teid = teid; } @@ -1160,7 +1144,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, goto lan_q_exit; /* get the first queue group node from VSI sub-tree */ - qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer); + qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); while (qgrp_node) { /* make sure the qgroup node is part of the VSI subtree */ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) @@ -1191,7 +1175,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, u8 vsi_layer; vsi_layer = ice_sched_get_vsi_layer(hw); - node = ice_sched_get_first_node(hw, tc_node, vsi_layer); + node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer); /* Check whether it already exists */ while (node) { @@ -1316,7 +1300,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, /* If intermediate nodes are reached max children * then add a new one. */ - node = ice_sched_get_first_node(hw, tc_node, (u8)i); + node = ice_sched_get_first_node(hw->port_info, tc_node, + (u8)i); /* scan all the siblings */ while (node) { if (node->num_children < hw->max_children[i]) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 8271fd651725..1acdd43a2edd 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -1623,12 +1623,13 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, ice_aqc_opc_remove_sw_rules, NULL); - if (status) - goto exit; /* Remove a book keeping from the list */ devm_kfree(ice_hw_to_dev(hw), s_rule); + if (status) + goto exit; + list_del(&list_elem->list_entry); devm_kfree(ice_hw_to_dev(hw), list_elem); } @@ -2137,6 +2138,38 @@ out: } /** + * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry + * @hw: pointer to the hardware structure + * @recp_id: lookup type for which the specified rule needs to be searched + * @f_info: rule information + * + * Helper function to search for a unicast rule entry - this is to be used + * to remove unicast MAC filter that is not shared with other VSIs on the + * PF switch. + * + * Returns pointer to entry storing the rule if found + */ +static struct ice_fltr_mgmt_list_entry * +ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, + struct ice_fltr_info *f_info) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *list_itr; + struct list_head *list_head; + + list_head = &sw->recp_list[recp_id].filt_rules; + list_for_each_entry(list_itr, list_head, list_entry) { + if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, + sizeof(f_info->l_data)) && + f_info->fwd_id.hw_vsi_id == + list_itr->fltr_info.fwd_id.hw_vsi_id && + f_info->flag == list_itr->fltr_info.flag) + return list_itr; + } + return NULL; +} + +/** * ice_remove_mac - remove a MAC address based filter rule * @hw: pointer to the hardware structure * @m_list: list of MAC addresses and forwarding information @@ -2153,15 +2186,39 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_fltr_list_entry *list_itr, *tmp; + struct mutex *rule_lock; /* Lock to protect filter rule list */ if (!m_list) return ICE_ERR_PARAM; + rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; + u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; + u16 vsi_handle; if (l_type != ICE_SW_LKUP_MAC) return ICE_ERR_PARAM; + + vsi_handle = list_itr->fltr_info.vsi_handle; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + list_itr->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, vsi_handle); + if (is_unicast_ether_addr(add) && !hw->ucast_shared) { + /* Don't remove the unicast address that belongs to + * another VSI on the switch, since it is not being + * shared... + */ + mutex_lock(rule_lock); + if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, + &list_itr->fltr_info)) { + mutex_unlock(rule_lock); + return ICE_ERR_DOES_NOT_EXIST; + } + mutex_unlock(rule_lock); + } list_itr->status = ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC, list_itr); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 3c83230434b6..33dd103035dc 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -95,17 +95,16 @@ void ice_free_tx_ring(struct ice_ring *tx_ring) /** * ice_clean_tx_irq - Reclaim resources after transmit completes - * @vsi: the VSI we care about * @tx_ring: Tx ring to clean * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished) */ -static bool -ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) +static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) { unsigned int total_bytes = 0, total_pkts = 0; - unsigned int budget = vsi->work_lmt; + unsigned int budget = ICE_DFLT_IRQ_WORK; + struct ice_vsi *vsi = tx_ring->vsi; s16 i = tx_ring->next_to_clean; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; @@ -114,6 +113,8 @@ ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) tx_desc = ICE_TX_DESC(tx_ring, i); i -= tx_ring->count; + prefetch(&vsi->state); + do { struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; @@ -206,7 +207,7 @@ ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->q_index) && - !test_bit(__ICE_DOWN, vsi->state)) { + !test_bit(__ICE_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->q_index); ++tx_ring->tx_stats.restart_q; @@ -377,18 +378,28 @@ err: */ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) { + u16 prev_ntu = rx_ring->next_to_use; + rx_ring->next_to_use = val; /* update next to alloc since we have filled the ring */ rx_ring->next_to_alloc = val; - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). + /* QRX_TAIL will be updated with any tail value, but hardware ignores + * the lower 3 bits. This makes it so we only bump tail on meaningful + * boundaries. Also, this allows us to bump tail on intervals of 8 up to + * the budget depending on the current traffic load. */ - wmb(); - writel(val, rx_ring->tail); + val &= ~0x7; + if (prev_ntu != val) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); + } } /** @@ -445,7 +456,13 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * - * Returns false if all allocations were successful, true if any fail + * Returns false if all allocations were successful, true if any fail. Returning + * true signals to the caller that we didn't replace cleaned_count buffers and + * there is more work to do. + * + * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx + * buffers. Then bump tail at most one time. Grouping like this lets us avoid + * multiple tail writes per call. */ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) { @@ -462,8 +479,9 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) bi = &rx_ring->rx_buf[ntu]; do { + /* if we fail here, we have work remaining */ if (!ice_alloc_mapped_page(rx_ring, bi)) - goto no_bufs; + break; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, @@ -494,16 +512,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) if (rx_ring->next_to_use != ntu) ice_release_rx_desc(rx_ring, ntu); - return false; - -no_bufs: - if (rx_ring->next_to_use != ntu) - ice_release_rx_desc(rx_ring, ntu); - - /* make sure to come back via polling to try again after - * allocation failure - */ - return true; + return !!cleaned_count; } /** @@ -599,6 +608,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, unsigned int truesize = ICE_RXBUF_2048; #endif + if (!size) + return; skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, rx_buf->page_offset, size, truesize); @@ -654,6 +665,8 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, prefetchw(rx_buf->page); *skb = rx_buf->skb; + if (!size) + return rx_buf; /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, rx_buf->page_offset, size, @@ -737,8 +750,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, */ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) { - /* hand second half of page back to the ring */ + if (!rx_buf) + return; + if (ice_can_reuse_rx_page(rx_buf)) { + /* hand second half of page back to the ring */ ice_reuse_rx_page(rx_ring, rx_buf); rx_ring->rx_stats.page_reuse_count++; } else { @@ -864,7 +880,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, /** * ice_rx_csum - Indicate in skb if checksum is good - * @vsi: the VSI we care about + * @ring: the ring we care about * @skb: skb currently being received and modified * @rx_desc: the receive descriptor * @ptype: the packet type decoded by hardware @@ -872,7 +888,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, * skb->protocol must be set before this function is called */ static void -ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, +ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, union ice_32b_rx_flex_desc *rx_desc, u8 ptype) { struct ice_rx_ptype_decoded decoded; @@ -889,7 +905,7 @@ ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, skb_checksum_none_assert(skb); /* check if Rx checksum is enabled */ - if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; /* check if HW has decoded the packet and checksum */ @@ -929,7 +945,7 @@ ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, return; checksum_fail: - vsi->back->hw_csum_rx_error++; + ring->vsi->back->hw_csum_rx_error++; } /** @@ -953,7 +969,7 @@ ice_process_skb_fields(struct ice_ring *rx_ring, /* modifies the skb - consumes the enet header */ skb->protocol = eth_type_trans(skb, rx_ring->netdev); - ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype); + ice_rx_csum(rx_ring, skb, rx_desc, ptype); } /** @@ -990,7 +1006,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); - bool failure = false; + bool failure; /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { @@ -1002,13 +1018,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) u16 vlan_tag = 0; u8 rx_ptype; - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= ICE_RX_BUF_WRITE) { - failure = failure || - ice_alloc_rx_bufs(rx_ring, cleaned_count); - cleaned_count = 0; - } - /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); @@ -1030,8 +1039,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) size = le16_to_cpu(rx_desc->wb.pkt_len) & ICE_RX_FLX_DESC_PKT_LEN_M; + /* retrieve a buffer from the ring */ rx_buf = ice_get_rx_buf(rx_ring, &skb, size); - /* allocate (if needed) and populate skb */ + if (skb) ice_add_rx_frag(rx_buf, skb, size); else @@ -1040,7 +1050,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buf_failed++; - rx_buf->pagecnt_bias++; + if (rx_buf) + rx_buf->pagecnt_bias++; break; } @@ -1057,9 +1068,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) continue; } - rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & - ICE_RX_FLEX_DESC_PTYPE_M; - stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); if (ice_test_staterr(rx_desc, stat_err_bits)) vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); @@ -1076,6 +1084,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) total_rx_bytes += skb->len; /* populate checksum, VLAN, and protocol */ + rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & + ICE_RX_FLEX_DESC_PTYPE_M; + ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); /* send completed skb up the stack */ @@ -1085,6 +1096,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) total_rx_pkts++; } + /* return up to cleaned_count buffers to hardware */ + failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); + /* update queue and vector specific stats */ u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.pkts += total_rx_pkts; @@ -1212,6 +1226,8 @@ ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) if (time_after(next_update, rc->next_update)) goto clear_counts; + prefetch(q_vector->vsi->port_info); + packets = rc->total_pkts; bytes = rc->total_bytes; @@ -1341,16 +1357,32 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) /** * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt - * @vsi: the VSI associated with the q_vector * @q_vector: q_vector for which ITR is being updated and interrupt enabled */ -static void -ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) +static void ice_update_ena_itr(struct ice_q_vector *q_vector) { struct ice_ring_container *tx = &q_vector->tx; struct ice_ring_container *rx = &q_vector->rx; + struct ice_vsi *vsi = q_vector->vsi; u32 itr_val; + /* when exiting WB_ON_ITR lets set a low ITR value and trigger + * interrupts to expire right away in case we have more work ready to go + * already + */ + if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) { + itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS); + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); + /* set target back to last user set value */ + rx->target_itr = rx->itr_setting; + /* set current to what we just wrote and dynamic if needed */ + rx->current_itr = ICE_WB_ON_ITR_USECS | + (rx->itr_setting & ICE_ITR_DYNAMIC); + /* allow normal interrupt flow to start */ + q_vector->itr_countdown = 0; + return; + } + /* This will do nothing if dynamic updates are not enabled */ ice_update_itr(q_vector, tx); ice_update_itr(q_vector, rx); @@ -1389,13 +1421,48 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) q_vector->itr_countdown--; } - if (!test_bit(__ICE_DOWN, vsi->state)) - wr32(&vsi->back->hw, + if (!test_bit(__ICE_DOWN, q_vector->vsi->state)) + wr32(&q_vector->vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); } /** + * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector + * @q_vector: q_vector to set WB_ON_ITR on + * + * We need to tell hardware to write-back completed descriptors even when + * interrupts are disabled. Descriptors will be written back on cache line + * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR + * descriptors may not be written back if they don't fill a cache line until the + * next interrupt. + * + * This sets the write-back frequency to 2 microseconds as that is the minimum + * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to + * make sure hardware knows we aren't meddling with the INTENA_M bit. + */ +static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) +{ + struct ice_vsi *vsi = q_vector->vsi; + + /* already in WB_ON_ITR mode no need to change it */ + if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) + return; + + if (q_vector->num_ring_rx) + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), + ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, + ICE_RX_ITR)); + + if (q_vector->num_ring_tx) + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), + ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, + ICE_TX_ITR)); + + q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; +} + +/** * ice_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets @@ -1408,29 +1475,32 @@ int ice_napi_poll(struct napi_struct *napi, int budget) { struct ice_q_vector *q_vector = container_of(napi, struct ice_q_vector, napi); - struct ice_vsi *vsi = q_vector->vsi; - struct ice_pf *pf = vsi->back; bool clean_complete = true; - int budget_per_ring = 0; struct ice_ring *ring; + int budget_per_ring; int work_done = 0; /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ ice_for_each_ring(ring, q_vector->tx) - if (!ice_clean_tx_irq(vsi, ring, budget)) + if (!ice_clean_tx_irq(ring, budget)) clean_complete = false; /* Handle case where we are called by netpoll with a budget of 0 */ - if (budget <= 0) + if (unlikely(budget <= 0)) return budget; - /* We attempt to distribute budget to each Rx queue fairly, but don't - * allow the budget to go below 1 because that would exit polling early. - */ - if (q_vector->num_ring_rx) + /* normally we have 1 Rx ring per q_vector */ + if (unlikely(q_vector->num_ring_rx > 1)) + /* We attempt to distribute budget to each Rx queue fairly, but + * don't allow the budget to go below 1 because that would exit + * polling early. + */ budget_per_ring = max(budget / q_vector->num_ring_rx, 1); + else + /* Max of 1 Rx ring in this q_vector so give it the budget */ + budget_per_ring = budget; ice_for_each_ring(ring, q_vector->rx) { int cleaned; @@ -1450,8 +1520,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget) * poll us due to busy-polling */ if (likely(napi_complete_done(napi, work_done))) - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_update_ena_itr(vsi, q_vector); + ice_update_ena_itr(q_vector); + else + ice_set_wb_on_itr(q_vector); return min_t(int, work_done, budget - 1); } @@ -1521,7 +1592,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, { u64 td_offset, td_tag, td_cmd; u16 i = tx_ring->next_to_use; - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int data_len, size; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; @@ -1923,7 +1994,7 @@ static unsigned int ice_txd_use_count(unsigned int size) */ static unsigned int ice_xmit_desc_count(struct sk_buff *skb) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int count = 0, size = skb_headlen(skb); @@ -1954,7 +2025,7 @@ static unsigned int ice_xmit_desc_count(struct sk_buff *skb) */ static bool __ice_chk_linearize(struct sk_buff *skb) { - const struct skb_frag_struct *frag, *stale; + const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ @@ -2036,6 +2107,7 @@ static netdev_tx_t ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) { struct ice_tx_offload_params offload = { 0 }; + struct ice_vsi *vsi = tx_ring->vsi; struct ice_tx_buf *first; unsigned int count; int tso, csum; @@ -2083,7 +2155,15 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) if (csum < 0) goto out_drop; - if (tso || offload.cd_tunnel_params) { + /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ + if (unlikely(skb->priority == TC_PRIO_CONTROL && + vsi->type == ICE_VSI_PF && + vsi->port_info->is_sw_lldp)) + offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | + ICE_TX_CTX_DESC_SWTCH_UPLINK << + ICE_TXD_CTX_QW1_CMD_S); + + if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { struct ice_tx_ctx_desc *cdesc; int i = tx_ring->next_to_use; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index ec76aba347b9..94a9280193e2 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -144,6 +144,19 @@ enum ice_rx_dtype { #define ICE_DFLT_INTRL 0 #define ICE_MAX_INTRL 236 +#define ICE_WB_ON_ITR_USECS 2 +#define ICE_IN_WB_ON_ITR_MODE 255 +/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows + * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also, + * set the write-back latency to the usecs passed in. + */ +#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \ + ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \ + GLINT_DYN_CTL_INTERVAL_M) | \ + (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \ + GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \ + GLINT_DYN_CTL_WB_ON_ITR_M) + /* Legacy or Advanced Mode Queue */ #define ICE_TX_ADVANCED 0 #define ICE_TX_LEGACY 1 diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 24bbef8bbe69..6667d17a4206 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -4,18 +4,19 @@ #ifndef _ICE_TYPE_H_ #define _ICE_TYPE_H_ +#define ICE_BYTES_PER_WORD 2 +#define ICE_BYTES_PER_DWORD 4 + #include "ice_status.h" #include "ice_hw_autogen.h" #include "ice_osdep.h" #include "ice_controlq.h" #include "ice_lan_tx_rx.h" +#include "ice_flex_type.h" -#define ICE_BYTES_PER_WORD 2 -#define ICE_BYTES_PER_DWORD 4 - -static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) +static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { - return test_bit(tc, (unsigned long *)&bitmap); + return test_bit(tc, &bitmap); } /* Driver always calls main vsi_handle first */ @@ -31,6 +32,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) #define ICE_DBG_LAN BIT_ULL(8) #define ICE_DBG_SW BIT_ULL(13) #define ICE_DBG_SCHED BIT_ULL(14) +#define ICE_DBG_PKG BIT_ULL(16) #define ICE_DBG_RES BIT_ULL(17) #define ICE_DBG_AQ_MSG BIT_ULL(24) #define ICE_DBG_AQ_CMD BIT_ULL(27) @@ -53,6 +55,14 @@ enum ice_aq_res_access_type { ICE_RES_WRITE }; +struct ice_driver_ver { + u8 major_ver; + u8 minor_ver; + u8 build_ver; + u8 subbuild_ver; + u8 driver_string[32]; +}; + enum ice_fc_mode { ICE_FC_NONE = 0, ICE_FC_RX_PAUSE, @@ -139,6 +149,9 @@ struct ice_phy_info { /* Common HW capabilities for SW use */ struct ice_hw_common_caps { u32 valid_functions; + /* DCB capabilities */ + u32 active_tc_bitmap; + u32 maxtc; /* Tx/Rx queues */ u16 num_rxq; /* Number/Total Rx queues */ @@ -219,6 +232,8 @@ struct ice_nvm_info { u8 blank_nvm_mode; /* is NVM empty (no FW present) */ }; +#define ICE_NVM_VER_LEN 32 + /* Max number of port to queue branches w.r.t topology */ #define ICE_MAX_TRAFFIC_CLASS 8 #define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS @@ -347,6 +362,8 @@ struct ice_port_info { struct ice_mac_info mac; struct ice_phy_info phy; struct mutex sched_lock; /* protect access to TXSched tree */ + struct ice_sched_node * + sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM]; struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */ /* DCBX info */ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */ @@ -454,6 +471,30 @@ struct ice_hw { u8 ucast_shared; /* true if VSIs can share unicast addr */ + /* Active package version (currently active) */ + struct ice_pkg_ver active_pkg_ver; + u8 active_pkg_name[ICE_PKG_NAME_SIZE]; + u8 active_pkg_in_nvm; + + enum ice_aq_err pkg_dwnld_status; + + /* Driver's package ver - (from the Metadata seg) */ + struct ice_pkg_ver pkg_ver; + u8 pkg_name[ICE_PKG_NAME_SIZE]; + + /* Driver's Ice package version (from the Ice seg) */ + struct ice_pkg_ver ice_pkg_ver; + u8 ice_pkg_name[ICE_PKG_NAME_SIZE]; + + /* Pointer to the ice segment */ + struct ice_seg *seg; + + /* Pointer to allocated copy of pkg memory */ + u8 *pkg_copy; + u32 pkg_size; + + /* HW block tables */ + struct ice_blk_info blk[ICE_BLK_COUNT]; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 5d24b539648f..b45797f39b2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -129,7 +129,10 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf) pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; - if (vf->link_forced) + /* Always report link is down if the VF queues aren't enabled */ + if (!vf->num_qs_ena) + ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false); + else if (vf->link_forced) ice_set_pfe_link_forced(vf, &pfe, vf->link_up); else ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info & @@ -252,6 +255,35 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf) } /** + * ice_set_vf_state_qs_dis - Set VF queues state to disabled + * @vf: pointer to the VF structure + */ +void ice_set_vf_state_qs_dis(struct ice_vf *vf) +{ + /* Clear Rx/Tx enabled queues flag */ + bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF); + bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF); + vf->num_qs_ena = 0; + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); +} + +/** + * ice_dis_vf_qs - Disable the VF queues + * @vf: pointer to the VF structure + */ +static void ice_dis_vf_qs(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = pf->vsi[vf->lan_vsi_idx]; + + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_rx_rings(vsi); + ice_set_vf_state_qs_dis(vf); +} + +/** * ice_free_vfs - Free all VFs * @pf: pointer to the PF structure */ @@ -267,19 +299,9 @@ void ice_free_vfs(struct ice_pf *pf) usleep_range(1000, 2000); /* Avoid wait time by stopping all VFs at the same time */ - for (i = 0; i < pf->num_alloc_vfs; i++) { - struct ice_vsi *vsi; - - if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states)) - continue; - - vsi = pf->vsi[pf->vf[i].lan_vsi_idx]; - /* stop rings without wait time */ - ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i); - ice_vsi_stop_rx_rings(vsi); - - clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); - } + for (i = 0; i < pf->num_alloc_vfs; i++) + if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states)) + ice_dis_vf_qs(&pf->vf[i]); /* Disable IOV before freeing resources. This lets any VF drivers * running in the host get themselves cleaned up before we yank @@ -297,13 +319,6 @@ void ice_free_vfs(struct ice_pf *pf) if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { /* disable VF qp mappings */ ice_dis_vf_mappings(&pf->vf[i]); - - /* Set this state so that assigned VF vectors can be - * reclaimed by PF for reuse in ice_vsi_release(). No - * need to clear this bit since pf->vf array is being - * freed anyways after this for loop - */ - set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states); ice_free_vf_res(&pf->vf[i]); } } @@ -341,12 +356,13 @@ void ice_free_vfs(struct ice_pf *pf) * ice_trigger_vf_reset - Reset a VF on HW * @vf: pointer to the VF structure * @is_vflr: true if VFLR was issued, false if not + * @is_pfr: true if the reset was triggered due to a previous PFR * * Trigger hardware to start a reset for a particular VF. Expects the caller * to wait the proper amount of time to allow hardware to reset the VF before * it cleans up and restores VF functionality. */ -static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) +static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) { struct ice_pf *pf = vf->pf; u32 reg, reg_idx, bit_idx; @@ -367,10 +383,13 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) */ clear_bit(ICE_VF_STATE_INIT, vf->vf_states); - /* Clear the VF's ARQLEN register. This is how the VF detects reset, - * since the VFGEN_RSTAT register doesn't stick at 0 after reset. + /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it + * in the case of VFR. If this is done for PFR, it can mess up VF + * resets because the VF driver may already have started cleanup + * by the time we get here. */ - wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0); + if (!is_pfr) + wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0); /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. @@ -389,12 +408,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) wr32(hw, PF_PCI_CIAA, VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); - for (i = 0; i < 100; i++) { + for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { reg = rd32(hw, PF_PCI_CIAD); - if ((reg & VF_TRANS_PENDING_M) != 0) - dev_err(&pf->pdev->dev, - "VF %d PCI transactions stuck\n", vf->vf_id); - udelay(1); + /* no transactions pending so stop polling */ + if ((reg & VF_TRANS_PENDING_M) == 0) + break; + + dev_err(&pf->pdev->dev, + "VF %d PCI transactions stuck\n", vf->vf_id); + udelay(ICE_PCI_CIAD_WAIT_DELAY_US); } } @@ -481,19 +503,20 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id) } /** - * ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW + * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space * @pf: pointer to PF structure * @vf: pointer to VF that the first MSIX vector index is being calculated for * - * This returns the first MSIX vector index in HW that is used by this VF and - * this will always be the OICR index in the AVF driver so any functionality + * This returns the first MSIX vector index in PF space that is used by this VF. + * This index is used when accessing PF relative registers such as + * GLINT_VECT2FUNC and GLINT_DYN_CTL. + * This will always be the OICR index in the AVF driver so any functionality * using vf->first_vector_idx for queue configuration will have to increment by * 1 to avoid meddling with the OICR index. */ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) { - return pf->hw.func_caps.common_cap.msix_vector_first_id + - pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix; + return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix; } /** @@ -543,7 +566,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) status = ice_add_mac(&pf->hw, &tmp_add_list); if (status) - dev_err(&pf->pdev->dev, "could not add mac filters\n"); + dev_err(&pf->pdev->dev, + "could not add mac filters error %d\n", status); + else + vf->num_mac = 1; /* Clear this bit after VF initialization since we shouldn't reclaim * and reassign interrupts for synchronous or asynchronous VFR events. @@ -551,7 +577,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) * expect vector assignment to be changed unless there is a request for * more vectors. */ - clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states); ice_alloc_vsi_res_exit: ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); return status; @@ -567,20 +592,21 @@ static int ice_alloc_vf_res(struct ice_vf *vf) int tx_rx_queue_left; int status; - /* setup VF VSI and necessary resources */ - status = ice_alloc_vsi_res(vf); - if (status) - goto ice_alloc_vf_res_exit; - /* Update number of VF queues, in case VF had requested for queue * changes */ - tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf), + ice_get_avail_rxq_count(pf)); tx_rx_queue_left += ICE_DFLT_QS_PER_VF; if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left && vf->num_req_qs != vf->num_vf_qs) vf->num_vf_qs = vf->num_req_qs; + /* setup VF VSI and necessary resources */ + status = ice_alloc_vsi_res(vf); + if (status) + goto ice_alloc_vf_res_exit; + if (vf->trusted) set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); else @@ -605,27 +631,30 @@ ice_alloc_vf_res_exit: */ static void ice_ena_vf_mappings(struct ice_vf *vf) { + int abs_vf_id, abs_first, abs_last; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int first, last, v; struct ice_hw *hw; - int abs_vf_id; u32 reg; hw = &pf->hw; vsi = pf->vsi[vf->lan_vsi_idx]; first = vf->first_vector_idx; last = (first + pf->num_vf_msix) - 1; + abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id; + abs_last = (abs_first + pf->num_vf_msix) - 1; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; /* VF Vector allocation */ - reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | - ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | + reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | + ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); wr32(hw, VPINT_ALLOC(vf->vf_id), reg); - reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) | - ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | + reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S) + & VPINT_ALLOC_PCI_FIRST_M) | + ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); /* map the interrupts to its functions */ @@ -870,11 +899,11 @@ static int ice_check_avail_res(struct ice_pf *pf) * at runtime through Virtchnl, that is the reason we start by reserving * few queues. */ - num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF, - ICE_MIN_QS_PER_VF); + num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), + ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); - num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF, - ICE_MIN_QS_PER_VF); + num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), + ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); if (!num_txq || !num_rxq) return -EIO; @@ -983,6 +1012,47 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, } /** + * ice_config_res_vfs - Finalize allocation of VFs resources in one go + * @pf: pointer to the PF structure + * + * This function is being called as last part of resetting all VFs, or when + * configuring VFs for the first time, where there is no resource to be freed + * Returns true if resources were properly allocated for all VFs, and false + * otherwise. + */ +static bool ice_config_res_vfs(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + int v; + + if (ice_check_avail_res(pf)) { + dev_err(&pf->pdev->dev, + "Cannot allocate VF resources, try with fewer number of VFs\n"); + return false; + } + + /* rearm global interrupts */ + if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state)) + ice_irq_dynamic_ena(hw, NULL, NULL); + + /* Finish resetting each VF and allocate resources */ + for (v = 0; v < pf->num_alloc_vfs; v++) { + struct ice_vf *vf = &pf->vf[v]; + + vf->num_vf_qs = pf->num_vf_qps; + dev_dbg(&pf->pdev->dev, + "VF-id %d has %d queues configured\n", + vf->vf_id, vf->num_vf_qs); + ice_cleanup_and_realloc_vf(vf); + } + + ice_flush(hw); + clear_bit(__ICE_VF_DIS, pf->state); + + return true; +} + +/** * ice_reset_all_vfs - reset all allocated VFs in one go * @pf: pointer to the PF structure * @is_vflr: true if VFLR was issued, false if not @@ -1010,18 +1080,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) /* Begin reset on all VFs at once */ for (v = 0; v < pf->num_alloc_vfs; v++) - ice_trigger_vf_reset(&pf->vf[v], is_vflr); + ice_trigger_vf_reset(&pf->vf[v], is_vflr, true); for (v = 0; v < pf->num_alloc_vfs; v++) { struct ice_vsi *vsi; vf = &pf->vf[v]; vsi = pf->vsi[vf->lan_vsi_idx]; - if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { - ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); - ice_vsi_stop_rx_rings(vsi); - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); - } + if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) + ice_dis_vf_qs(vf); + ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, + NULL, ICE_VF_RESET, vf->vf_id, NULL); } /* HW requires some time to make sure it can flush the FIFO for a VF @@ -1031,7 +1100,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) * finished resetting. */ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { - usleep_range(10000, 20000); /* Check each VF in sequence */ while (v < pf->num_alloc_vfs) { @@ -1039,8 +1107,11 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) vf = &pf->vf[v]; reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); - if (!(reg & VPGEN_VFRSTAT_VFRD_M)) + if (!(reg & VPGEN_VFRSTAT_VFRD_M)) { + /* only delay if the check failed */ + usleep_range(10, 20); break; + } /* If the current VF has finished resetting, move on * to the next VF in sequence. @@ -1054,7 +1125,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) */ if (v < pf->num_alloc_vfs) dev_warn(&pf->pdev->dev, "VF reset check timeout\n"); - usleep_range(10000, 20000); /* free VF resources to begin resetting the VSI state */ for (v = 0; v < pf->num_alloc_vfs; v++) { @@ -1074,25 +1144,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) dev_err(&pf->pdev->dev, "Failed to free MSIX resources used by SR-IOV\n"); - if (ice_check_avail_res(pf)) { - dev_err(&pf->pdev->dev, - "Cannot allocate VF resources, try with fewer number of VFs\n"); + if (!ice_config_res_vfs(pf)) return false; - } - - /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) { - vf = &pf->vf[v]; - - vf->num_vf_qs = pf->num_vf_qps; - dev_dbg(&pf->pdev->dev, - "VF-id %d has %d queues configured\n", - vf->vf_id, vf->num_vf_qs); - ice_cleanup_and_realloc_vf(vf); - } - - ice_flush(hw); - clear_bit(__ICE_VF_DIS, pf->state); return true; } @@ -1114,27 +1167,31 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) u32 reg; int i; - /* If the VFs have been disabled, this means something else is - * resetting the VF, so we shouldn't continue. + /* If the PF has been disabled, there is no need resetting VF until + * PF is active again. */ - if (test_and_set_bit(__ICE_VF_DIS, pf->state)) + if (test_bit(__ICE_VF_DIS, pf->state)) return false; - ice_trigger_vf_reset(vf, is_vflr); + /* If the VF has been disabled, this means something else is + * resetting the VF, so we shouldn't continue. Otherwise, set + * disable VF state bit for actual reset, and continue. + */ + if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states)) + return false; + + ice_trigger_vf_reset(vf, is_vflr, false); vsi = pf->vsi[vf->lan_vsi_idx]; - if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { - ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); - ice_vsi_stop_rx_rings(vsi); - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); - } else { - /* Call Disable LAN Tx queue AQ call even when queues are not - * enabled. This is needed for successful completiom of VFR - */ - ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, - NULL, ICE_VF_RESET, vf->vf_id, NULL); - } + if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) + ice_dis_vf_qs(vf); + + /* Call Disable LAN Tx queue AQ whether or not queues are + * enabled. This is needed for successful completion of VFR. + */ + ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, + NULL, ICE_VF_RESET, vf->vf_id, NULL); hw = &pf->hw; /* poll VPGEN_VFRSTAT reg to make sure @@ -1145,12 +1202,14 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) * poll the status register to make sure that the reset * completed successfully. */ - usleep_range(10000, 20000); reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); if (reg & VPGEN_VFRSTAT_VFRD_M) { rsd = true; break; } + + /* only sleep if the reset is not done */ + usleep_range(10, 20); } /* Display a warning if VF didn't manage to reset in time, but need to @@ -1160,8 +1219,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n", vf->vf_id); - usleep_range(10000, 20000); - /* disable promiscuous modes in case they were enabled * ignore any error if disabling process failed */ @@ -1183,7 +1240,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ice_cleanup_and_realloc_vf(vf); ice_flush(hw); - clear_bit(__ICE_VF_DIS, pf->state); return true; } @@ -1257,7 +1313,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) /* Disable global interrupt 0 so we don't try to handle the VFLR. */ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); - + set_bit(__ICE_OICR_INTR_DIS, pf->state); ice_flush(hw); ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); @@ -1283,19 +1339,16 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) /* assign default capabilities */ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; - - /* Set this state so that PF driver does VF vector assignment */ - set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states); } pf->num_alloc_vfs = num_alloc_vfs; - /* VF resources get allocated during reset */ - if (!ice_reset_all_vfs(pf, true)) { + /* VF resources get allocated with initialization */ + if (!ice_config_res_vfs(pf)) { ret = -EIO; goto err_unroll_sriov; } - goto err_unroll_intr; + return ret; err_unroll_sriov: pf->vf = NULL; @@ -1307,6 +1360,7 @@ err_pci_disable_sriov: err_unroll_intr: /* rearm interrupts here */ ice_irq_dynamic_ena(hw, NULL, NULL); + clear_bit(__ICE_OICR_INTR_DIS, pf->state); return ret; } @@ -1389,6 +1443,12 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct ice_pf *pf = pci_get_drvdata(pdev); + if (ice_is_safe_mode(pf)) { + dev_err(&pf->pdev->dev, + "SR-IOV cannot be configured - Device is in Safe Mode\n"); + return -EOPNOTSUPP; + } + if (num_vfs) return ice_pci_sriov_ena(pf, num_vfs); @@ -1490,10 +1550,10 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); - if (aq_ret) { + if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { dev_info(&pf->pdev->dev, - "Unable to send the message to VF %d aq_err %d\n", - vf->vf_id, pf->hw.mailboxq.sq_last_status); + "Unable to send the message to VF %d ret %d aq_err %d\n", + vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status); return -EIO; } @@ -1688,6 +1748,21 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) } /** + * ice_vc_isvalid_ring_len + * @ring_len: length of ring + * + * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE + * or zero + */ +static bool ice_vc_isvalid_ring_len(u16 ring_len) +{ + return ring_len == 0 || + (ring_len >= ICE_MIN_NUM_DESC && + ring_len <= ICE_MAX_NUM_DESC && + !(ring_len % ICE_REQ_DESC_MULTIPLE)); +} + +/** * ice_vc_config_rss_key * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1712,18 +1787,18 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { + if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1759,18 +1834,18 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { + if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1839,6 +1914,8 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) (struct virtchnl_queue_select *)msg; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + unsigned long q_map; + u16 vf_q_id; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1855,6 +1932,12 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || + vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1865,12 +1948,48 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) * Tx queue group list was configured and the context bits were * programmed using ice_vsi_cfg_txqs */ - if (ice_vsi_start_rx_rings(vsi)) - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + q_map = vqs->rx_queues; + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if enabled */ + if (test_bit(vf_q_id, vf->rxq_ena)) + continue; + + if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) { + dev_err(&vsi->back->pdev->dev, + "Failed to enable Rx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + set_bit(vf_q_id, vf->rxq_ena); + vf->num_qs_ena++; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + q_map = vqs->tx_queues; + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if enabled */ + if (test_bit(vf_q_id, vf->txq_ena)) + continue; + + set_bit(vf_q_id, vf->txq_ena); + vf->num_qs_ena++; + } /* Set flag to indicate that queues are enabled */ if (v_ret == VIRTCHNL_STATUS_SUCCESS) - set_bit(ICE_VF_STATE_ENA, vf->vf_states); + set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); error_param: /* send the response to the VF */ @@ -1893,9 +2012,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) (struct virtchnl_queue_select *)msg; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + unsigned long q_map; + u16 vf_q_id; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && - !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1910,29 +2031,81 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || + vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { - dev_err(&vsi->back->pdev->dev, - "Failed to stop tx rings on VSI %d\n", - vsi->vsi_num); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (vqs->tx_queues) { + q_map = vqs->tx_queues; + + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + struct ice_ring *ring = vsi->tx_rings[vf_q_id]; + struct ice_txq_meta txq_meta = { 0 }; + + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if not enabled */ + if (!test_bit(vf_q_id, vf->txq_ena)) + continue; + + ice_fill_txq_meta(vsi, ring, &txq_meta); + + if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, + ring, &txq_meta)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop Tx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Clear enabled queues flag */ + clear_bit(vf_q_id, vf->txq_ena); + vf->num_qs_ena--; + } } - if (ice_vsi_stop_rx_rings(vsi)) { - dev_err(&vsi->back->pdev->dev, - "Failed to stop rx rings on VSI %d\n", - vsi->vsi_num); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (vqs->rx_queues) { + q_map = vqs->rx_queues; + + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if not enabled */ + if (!test_bit(vf_q_id, vf->rxq_ena)) + continue; + + if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop Rx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Clear enabled queues flag */ + clear_bit(vf_q_id, vf->rxq_ena); + vf->num_qs_ena--; + } } /* Clear enabled queues flag */ - if (v_ret == VIRTCHNL_STATUS_SUCCESS) - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena) + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); error_param: /* send the response to the VF */ @@ -1962,12 +2135,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) irqmap_info = (struct virtchnl_irq_map_info *)msg; num_q_vectors_mapped = irqmap_info->num_vectors; - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; - } - /* Check to make sure number of VF vectors mapped is not greater than * number of VF vectors originally allocated, and check that * there is actually at least a single VF queue vector mapped @@ -1979,6 +2146,12 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + for (i = 0; i < num_q_vectors_mapped; i++) { struct ice_q_vector *q_vector; @@ -2056,6 +2229,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; + u16 num_rxq = 0, num_txq = 0; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int i; @@ -2071,13 +2245,16 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) } vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; + } - if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) { + if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF || + qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { dev_err(&pf->pdev->dev, "VF-%d requesting more than supported number of queues: %d\n", - vf->vf_id, qci->num_queue_pairs); + vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -2087,37 +2264,52 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) if (qpi->txq.vsi_id != qci->vsi_id || qpi->rxq.vsi_id != qci->vsi_id || qpi->rxq.queue_id != qpi->txq.queue_id || + qpi->txq.headwb_enabled || + !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || + !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } /* copy Tx queue info from VF into VSI */ - vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; - vsi->tx_rings[i]->count = qpi->txq.ring_len; - /* copy Rx queue info from VF into VSI */ - vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; - vsi->rx_rings[i]->count = qpi->rxq.ring_len; - if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; + if (qpi->txq.ring_len > 0) { + num_txq++; + vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; + vsi->tx_rings[i]->count = qpi->txq.ring_len; } - vsi->rx_buf_len = qpi->rxq.databuffer_size; - if (qpi->rxq.max_pkt_size >= (16 * 1024) || - qpi->rxq.max_pkt_size < 64) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; + + /* copy Rx queue info from VF into VSI */ + if (qpi->rxq.ring_len > 0) { + num_rxq++; + vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; + vsi->rx_rings[i]->count = qpi->rxq.ring_len; + + if (qpi->rxq.databuffer_size != 0 && + (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || + qpi->rxq.databuffer_size < 1024)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi->rx_buf_len = qpi->rxq.databuffer_size; + vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; + if (qpi->rxq.max_pkt_size >= (16 * 1024) || + qpi->rxq.max_pkt_size < 64) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } } + vsi->max_frame = qpi->rxq.max_pkt_size; } /* VF can request to configure less than allocated queues * or default allocated queues. So update the VSI with new number */ - vsi->num_txq = qci->num_queue_pairs; - vsi->num_rxq = qci->num_queue_pairs; + vsi->num_txq = num_txq; + vsi->num_rxq = num_rxq; /* All queues of VF VSI are in TC 0 */ - vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs; - vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs; + vsi->tc_cfg.tc_info[0].qcount_tx = num_txq; + vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq; if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi)) v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; @@ -2171,7 +2363,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) (struct virtchnl_ether_addr_list *)msg; struct ice_pf *pf = vf->pf; enum virtchnl_ops vc_op; - LIST_HEAD(mac_list); + enum ice_status status; struct ice_vsi *vsi; int mac_count = 0; int i; @@ -2245,33 +2437,32 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) goto handle_mac_exit; } - /* get here if maddr is multicast or if VF can change MAC */ - if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) { - v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + /* program the updated filter list */ + status = ice_vsi_cfg_mac_fltr(vsi, maddr, set); + if (status == ICE_ERR_DOES_NOT_EXIST || + status == ICE_ERR_ALREADY_EXISTS) { + dev_info(&pf->pdev->dev, + "can't %s MAC filters %pM for VF %d, error %d\n", + set ? "add" : "remove", maddr, vf->vf_id, + status); + } else if (status) { + dev_err(&pf->pdev->dev, + "can't %s MAC filters for VF %d, error %d\n", + set ? "add" : "remove", vf->vf_id, status); + v_ret = ice_err_to_virt_err(status); goto handle_mac_exit; } + mac_count++; } - /* program the updated filter list */ + /* Track number of MAC filters programmed for the VF VSI */ if (set) - v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list)); + vf->num_mac += mac_count; else - v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list)); - - if (v_ret) { - dev_err(&pf->pdev->dev, - "can't update MAC filters for VF %d, error %d\n", - vf->vf_id, v_ret); - } else { - if (set) - vf->num_mac += mac_count; - else - vf->num_mac -= mac_count; - } + vf->num_mac -= mac_count; handle_mac_exit: - ice_free_fltr_list(&pf->pdev->dev, &mac_list); /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); } @@ -2315,11 +2506,11 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; - int req_queues = vfres->num_queue_pairs; + u16 req_queues = vfres->num_queue_pairs; struct ice_pf *pf = vf->pf; - int max_allowed_vf_queues; - int tx_rx_queue_left; - int cur_queues; + u16 max_allowed_vf_queues; + u16 tx_rx_queue_left; + u16 cur_queues; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2327,29 +2518,31 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) } cur_queues = vf->num_vf_qs; - tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf), + ice_get_avail_rxq_count(pf)); max_allowed_vf_queues = tx_rx_queue_left + cur_queues; - if (req_queues <= 0) { + if (!req_queues) { dev_err(&pf->pdev->dev, - "VF %d tried to request %d queues. Ignoring.\n", - vf->vf_id, req_queues); + "VF %d tried to request 0 queues. Ignoring.\n", + vf->vf_id); } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) { dev_err(&pf->pdev->dev, "VF %d tried to request more than %d queues.\n", vf->vf_id, ICE_MAX_BASE_QS_PER_VF); vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF; - } else if (req_queues - cur_queues > tx_rx_queue_left) { + } else if (req_queues > cur_queues && + req_queues - cur_queues > tx_rx_queue_left) { dev_warn(&pf->pdev->dev, - "VF %d requested %d more queues, but only %d left.\n", + "VF %d requested %u more queues, but only %u left.\n", vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); - vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues, + vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues, ICE_MAX_BASE_QS_PER_VF); } else { /* request is successful, then reset VF */ vf->num_req_qs = req_queues; ice_vc_dis_vf(vf); dev_info(&pf->pdev->dev, - "VF %d granted request of %d queues.\n", + "VF %d granted request of %u queues.\n", vf->vf_id, req_queues); return 0; } @@ -2589,8 +2782,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) } vf->num_vlan--; - /* Disable VLAN pruning when removing VLAN */ - ice_cfg_vlan_pruning(vsi, false, false); + /* Disable VLAN pruning when the last VLAN is removed */ + if (!vf->num_vlan) + ice_cfg_vlan_pruning(vsi, false, false); /* Disable Unicast/Multicast VLAN promiscuous mode */ if (vlan_promisc) { @@ -2731,20 +2925,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) err = -EPERM; else err = -EINVAL; - goto error_handler; - } - - /* Perform additional checks specific to RSS and Virtchnl */ - if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { - struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; - - if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) - err = -EINVAL; - } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { - struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; - - if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) - err = -EINVAL; } error_handler: @@ -2762,6 +2942,7 @@ error_handler: break; case VIRTCHNL_OP_GET_VF_RESOURCES: err = ice_vc_get_vf_res_msg(vf, msg); + ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: ice_vc_reset_vf_msg(vf); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index c3ca522c245a..0d9880c8bba3 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -15,26 +15,38 @@ #define ICE_MAX_MACADDR_PER_VF 12 /* Malicious Driver Detection */ -#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3 #define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 +#define ICE_MDD_EVENTS_THRESHOLD 30 /* Static VF transaction/status register def */ #define VF_DEVICE_STATUS 0xAA #define VF_TRANS_PENDING_M 0x20 +/* wait defines for polling PF_PCI_CIAD register status */ +#define ICE_PCI_CIAD_WAIT_COUNT 100 +#define ICE_PCI_CIAD_WAIT_DELAY_US 1 + +/* VF resources default values and limitation */ +#define ICE_MAX_VF_COUNT 256 +#define ICE_MAX_QS_PER_VF 256 +#define ICE_MIN_QS_PER_VF 1 +#define ICE_DFLT_QS_PER_VF 4 +#define ICE_NONQ_VECS_VF 1 +#define ICE_MAX_SCATTER_QS_PER_VF 16 +#define ICE_MAX_BASE_QS_PER_VF 16 +#define ICE_MAX_INTR_PER_VF 65 +#define ICE_MAX_POLICY_INTR_PER_VF 33 +#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) +#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) + /* Specific VF states */ enum ice_vf_states { - ICE_VF_STATE_INIT = 0, - ICE_VF_STATE_ACTIVE, - ICE_VF_STATE_ENA, + ICE_VF_STATE_INIT = 0, /* PF is initializing VF */ + ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */ + ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */ ICE_VF_STATE_DIS, ICE_VF_STATE_MC_PROMISC, ICE_VF_STATE_UC_PROMISC, - /* state to indicate if PF needs to do vector assignment for VF. - * This needs to be set during first time VF initialization or later - * when VF asks for more Vectors through virtchnl OP. - */ - ICE_VF_STATE_CFG_INTR, ICE_VF_STATES_NBITS }; @@ -50,11 +62,14 @@ struct ice_vf { s16 vf_id; /* VF ID in the PF space */ u16 lan_vsi_idx; /* index into PF struct */ - int first_vector_idx; /* first vector index of this VF */ + /* first vector index of this VF in the PF space */ + int first_vector_idx; struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ struct virtchnl_version_info vf_ver; u32 driver_caps; /* reported by VF driver */ struct virtchnl_ether_addr dflt_lan_addr; + DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF); + DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF); u16 port_vlan_id; u8 pf_set_mac:1; /* VF MAC address set by VMM admin */ u8 trusted:1; @@ -77,6 +92,7 @@ struct ice_vf { u16 num_mac; u16 num_vlan; u16 num_vf_qs; /* num of queue configured per VF */ + u16 num_qs_ena; /* total num of Tx/Rx queue enabled */ }; #ifdef CONFIG_PCI_IOV @@ -103,12 +119,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); + +void ice_set_vf_state_qs_dis(struct ice_vf *vf); #else /* CONFIG_PCI_IOV */ #define ice_process_vflr_event(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0) #define ice_vc_process_vf_msg(pf, event) do {} while (0) #define ice_vc_notify_link_state(pf) do {} while (0) #define ice_vc_notify_reset(pf) do {} while (0) +#define ice_set_vf_state_qs_dis(vf) do {} while (0) static inline bool ice_reset_all_vfs(struct ice_pf __always_unused *pf, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b4df3e319467..105b0624081a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4731,8 +4731,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) { u16 i = rx_ring->next_to_clean; - if (rx_ring->skb) - dev_kfree_skb(rx_ring->skb); + dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; /* Free all the Rx ring sk_buffs */ @@ -5918,7 +5917,7 @@ static int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb = first->skb; struct igb_tx_buffer *tx_buffer; union e1000_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@ -6074,7 +6073,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); if (igb_maybe_stop_tx(tx_ring, count + 3)) { /* this is a hard error */ @@ -8879,8 +8879,7 @@ static int __maybe_unused igb_resume(struct device *dev) static int __maybe_unused igb_runtime_idle(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct igb_adapter *adapter = netdev_priv(netdev); if (!igb_has_link(adapter)) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 34cd30d7162f..0f2b68f4bb0f 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2174,7 +2174,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, goto dma_error; for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag; count++; i++; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 0f5534ce27b0..7e16345d836e 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -135,6 +135,9 @@ extern char igc_driver_version[]; /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +/* VLAN info */ +#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000 + /* igc_test_staterr - tests bits within Rx descriptor status and error fields */ static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, const u32 stat_err_bits) @@ -254,6 +257,7 @@ struct igc_ring { u16 count; /* number of desc. in the ring */ u8 queue_index; /* logical index of the ring*/ u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ /* everything past this point are written often */ u16 next_to_clean; diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index 59258d791106..db289bcce21d 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -40,7 +40,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw) ctrl = rd32(IGC_CTRL); hw_dbg("Issuing a global reset to MAC\n"); - wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); + wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST); ret_val = igc_get_auto_rd_done(hw); if (ret_val) { @@ -209,6 +209,9 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) switch (hw->device_id) { case IGC_DEV_ID_I225_LM: case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_K: mac->type = igc_i225; break; default: diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index 58d1109d7f3f..ea627ce52525 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -22,6 +22,14 @@ union igc_adv_tx_desc { } wb; }; +/* Context descriptors */ +struct igc_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 launch_time; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + /* Adv Transmit Descriptor Config Masks */ #define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ #define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index fc0ccfe38a20..f3f2325fe567 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -10,10 +10,6 @@ #define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ -/* PCI Bus Info */ -#define PCIE_DEVICE_CONTROL2 0x28 -#define PCIE_DEVICE_CONTROL2_16ms 0x0005 - /* Physical Func Reset Done Indication */ #define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000 @@ -54,7 +50,7 @@ #define IGC_ERR_SWFW_SYNC 13 /* Device Control */ -#define IGC_CTRL_RST 0x04000000 /* Global reset */ +#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */ #define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ #define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ @@ -401,4 +397,9 @@ #define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) #define IGC_VLAPQF_QUEUE_MASK 0x03 +#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */ +#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */ +#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ + #endif /* _IGC_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 1039a224ac80..abb2d72911ff 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -18,6 +18,9 @@ #define IGC_DEV_ID_I225_LM 0x15F2 #define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_K 0x3100 #define IGC_FUNC_0 0 @@ -151,16 +154,10 @@ struct igc_phy_info { u16 autoneg_advertised; u16 autoneg_mask; - u16 cable_length; - u16 max_cable_length; - u16 min_cable_length; - u16 pair_length[4]; u8 mdix; - bool disable_polarity_correction; bool is_mdix; - bool polarity_correction; bool reset_disable; bool speed_downgraded; bool autoneg_wait_to_complete; @@ -190,12 +187,7 @@ struct igc_fc_info { }; struct igc_dev_spec_base { - bool global_device_reset; - bool eee_disable; bool clear_semaphore_once; - bool module_plugged; - u8 media_port; - bool mas_capable; }; struct igc_hw { diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index ba4646737288..5eeb4c8caf4a 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -7,9 +7,6 @@ #include "igc_mac.h" #include "igc_hw.h" -/* forward declaration */ -static s32 igc_set_fc_watermarks(struct igc_hw *hw); - /** * igc_disable_pcie_master - Disables PCI-express master access * @hw: pointer to the HW structure @@ -75,6 +72,41 @@ void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) } /** + * igc_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + */ +static s32 igc_set_fc_watermarks(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(IGC_FCRTL, fcrtl); + wr32(IGC_FCRTH, fcrth); + + return 0; +} + +/** * igc_setup_link - Setup flow control and link settings * @hw: pointer to the HW structure * @@ -195,41 +227,6 @@ out: } /** - * igc_set_fc_watermarks - Set flow control high/low watermarks - * @hw: pointer to the HW structure - * - * Sets the flow control high/low threshold (watermark) registers. If - * flow control XON frame transmission is enabled, then set XON frame - * transmission as well. - */ -static s32 igc_set_fc_watermarks(struct igc_hw *hw) -{ - u32 fcrtl = 0, fcrth = 0; - - /* Set the flow control receive threshold registers. Normally, - * these registers will be set to a default threshold that may be - * adjusted later by the driver's runtime code. However, if the - * ability to transmit pause frames is not enabled, then these - * registers will be set to 0. - */ - if (hw->fc.current_mode & igc_fc_tx_pause) { - /* We need to set up the Receive Threshold high and low water - * marks as well as (optionally) enabling the transmission of - * XON frames. - */ - fcrtl = hw->fc.low_water; - if (hw->fc.send_xon) - fcrtl |= IGC_FCRTL_XONE; - - fcrth = hw->fc.high_water; - } - wr32(IGC_FCRTL, fcrtl); - wr32(IGC_FCRTH, fcrth); - - return 0; -} - -/** * igc_clear_hw_cntrs_base - Clear base hardware counters * @hw: pointer to the HW structure * diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index aa9323e55406..63b62d74f961 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -5,6 +5,11 @@ #include <linux/types.h> #include <linux/if_vlan.h> #include <linux/aer.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/ip.h> + +#include <net/ipv6.h> #include "igc.h" #include "igc_hw.h" @@ -36,6 +41,9 @@ static const struct igc_info *igc_info_tbl[] = { static const struct pci_device_id igc_pci_tbl[] = { { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, /* required last entry */ {0, } }; @@ -349,8 +357,7 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring) { u16 i = rx_ring->next_to_clean; - if (rx_ring->skb) - dev_kfree_skb(rx_ring->skb); + dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; /* Free all the Rx ring sk_buffs */ @@ -788,8 +795,96 @@ static int igc_set_mac(struct net_device *netdev, void *p) return 0; } +static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct igc_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + struct timespec64 ts; + + context_desc = IGC_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; + + /* For 82575, context index must be unique per ring. */ + if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid Tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + ts = ns_to_timespec64(first->skb->tstamp); + first->skb->tstamp = 0; + context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32); + } else { + context_desc->launch_time = 0; + } +} + +static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) { + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if ((first->protocol == htons(ETH_P_IP) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + (first->protocol == htons(ETH_P_IPV6) && + igc_ipv6_csum_is_sctp(skb))) { + type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; + break; + } + /* fall through */ + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGC_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); } static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) @@ -861,7 +956,7 @@ static int igc_tx_map(struct igc_ring *tx_ring, struct igc_tx_buffer *tx_buffer; union igc_adv_tx_desc *tx_desc; u32 tx_flags = first->tx_flags; - struct skb_frag_struct *frag; + skb_frag_t *frag; u16 i = tx_ring->next_to_use; unsigned int data_len, size; dma_addr_t dma; @@ -1015,7 +1110,8 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); if (igc_maybe_stop_tx(tx_ring, count + 3)) { /* this is a hard error */ @@ -4113,6 +4209,9 @@ static int igc_probe(struct pci_dev *pdev, if (err) goto err_sw_init; + /* Add supported features to the features list*/ + netdev->features |= NETIF_F_HW_CSUM; + /* setup the private structure */ err = igc_sw_init(adapter); if (err) @@ -4120,6 +4219,7 @@ static int igc_probe(struct pci_dev *pdev, /* copy netdev features into list of user selectable features */ netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= netdev->features; /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; @@ -4130,6 +4230,15 @@ static int igc_probe(struct pci_dev *pdev, */ hw->mac.ops.reset_hw(hw); + if (igc_get_flash_presence_i225(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, + "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { /* copy the MAC address out of the NVM */ if (hw->mac.ops.read_mac_addr(hw)) diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 4c8f96a9a148..f4b05af0dd2f 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -3,10 +3,6 @@ #include "igc_phy.h" -/* forward declaration */ -static s32 igc_phy_setup_autoneg(struct igc_hw *hw); -static s32 igc_wait_autoneg(struct igc_hw *hw); - /** * igc_check_reset_block - Check if PHY reset is blocked * @hw: pointer to the HW structure @@ -208,100 +204,6 @@ out: } /** - * igc_copper_link_autoneg - Setup/Enable autoneg for copper link - * @hw: pointer to the HW structure - * - * Performs initial bounds checking on autoneg advertisement parameter, then - * configure to advertise the full capability. Setup the PHY to autoneg - * and restart the negotiation process between the link partner. If - * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. - */ -static s32 igc_copper_link_autoneg(struct igc_hw *hw) -{ - struct igc_phy_info *phy = &hw->phy; - u16 phy_ctrl; - s32 ret_val; - - /* Perform some bounds checking on the autoneg advertisement - * parameter. - */ - phy->autoneg_advertised &= phy->autoneg_mask; - - /* If autoneg_advertised is zero, we assume it was not defaulted - * by the calling code so we set to advertise full capability. - */ - if (phy->autoneg_advertised == 0) - phy->autoneg_advertised = phy->autoneg_mask; - - hw_dbg("Reconfiguring auto-neg advertisement params\n"); - ret_val = igc_phy_setup_autoneg(hw); - if (ret_val) { - hw_dbg("Error Setting up Auto-Negotiation\n"); - goto out; - } - hw_dbg("Restarting Auto-Neg\n"); - - /* Restart auto-negotiation by setting the Auto Neg Enable bit and - * the Auto Neg Restart bit in the PHY control register. - */ - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); - if (ret_val) - goto out; - - phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); - if (ret_val) - goto out; - - /* Does the user want to wait for Auto-Neg to complete here, or - * check at a later time (for example, callback routine). - */ - if (phy->autoneg_wait_to_complete) { - ret_val = igc_wait_autoneg(hw); - if (ret_val) { - hw_dbg("Error while waiting for autoneg to complete\n"); - goto out; - } - } - - hw->mac.get_link_status = true; - -out: - return ret_val; -} - -/** - * igc_wait_autoneg - Wait for auto-neg completion - * @hw: pointer to the HW structure - * - * Waits for auto-negotiation to complete or for the auto-negotiation time - * limit to expire, which ever happens first. - */ -static s32 igc_wait_autoneg(struct igc_hw *hw) -{ - u16 i, phy_status; - s32 ret_val = 0; - - /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ - for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) - break; - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) - break; - if (phy_status & MII_SR_AUTONEG_COMPLETE) - break; - msleep(100); - } - - /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation - * has completed. - */ - return ret_val; -} - -/** * igc_phy_setup_autoneg - Configure PHY for auto-negotiation * @hw: pointer to the HW structure * @@ -486,6 +388,100 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw) } /** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + u16 i, phy_status; + s32 ret_val = 0; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +static s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_ctrl; + s32 ret_val; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** * igc_setup_copper_link - Configure copper link settings * @hw: pointer to the HW structure * diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index e5ac2d3fd816..0940a0da16f2 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1331,9 +1331,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; len = skb_frag_size(frag); offset = 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index 50dfb02fa34c..171cdc552961 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -190,22 +190,12 @@ static const struct file_operations ixgbe_dbg_netdev_ops_fops = { void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) { const char *name = pci_name(adapter->pdev); - struct dentry *pfile; + adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root); - if (adapter->ixgbe_dbg_adapter) { - pfile = debugfs_create_file("reg_ops", 0600, - adapter->ixgbe_dbg_adapter, adapter, - &ixgbe_dbg_reg_ops_fops); - if (!pfile) - e_dev_err("debugfs reg_ops for %s failed\n", name); - pfile = debugfs_create_file("netdev_ops", 0600, - adapter->ixgbe_dbg_adapter, adapter, - &ixgbe_dbg_netdev_ops_fops); - if (!pfile) - e_dev_err("debugfs netdev_ops for %s failed\n", name); - } else { - e_dev_err("debugfs entry for %s failed\n", name); - } + debugfs_create_file("reg_ops", 0600, adapter->ixgbe_dbg_adapter, + adapter, &ixgbe_dbg_reg_ops_fops); + debugfs_create_file("netdev_ops", 0600, adapter->ixgbe_dbg_adapter, + adapter, &ixgbe_dbg_netdev_ops_fops); } /** @@ -224,8 +214,6 @@ void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) void ixgbe_dbg_init(void) { ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL); - if (ixgbe_dbg_root == NULL) - pr_err("init of debugfs failed\n"); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 31629fc7e820..113f6087c7c9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -960,11 +960,9 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) return 0; err_aead: - memset(xs->aead, 0, sizeof(*xs->aead)); - kfree(xs->aead); + kzfree(xs->aead); err_xs: - memset(xs, 0, sizeof(*xs)); - kfree(xs); + kzfree(xs); err_out: msgbuf[1] = err; return err; @@ -1049,8 +1047,7 @@ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) ixgbe_ipsec_del_sa(xs); /* remove the xs that was made-up in the add request */ - memset(xs, 0, sizeof(*xs)); - kfree(xs); + kzfree(xs); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 51c696b6bf64..1ce2397306b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1786,7 +1786,7 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned char *va; unsigned int pull_len; @@ -1808,7 +1808,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, /* update all of the pointers */ skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; + skb_frag_off_add(frag, pull_len); skb->data_len -= pull_len; skb->tail += pull_len; } @@ -1826,13 +1826,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { - /* if the page was released unmap it, else just sync our portion */ - if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IXGBE_RX_DMA_ATTR); - } else if (ring_uses_build_skb(rx_ring)) { + if (ring_uses_build_skb(rx_ring)) { unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; dma_sync_single_range_for_cpu(rx_ring->dev, @@ -1841,14 +1835,22 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, skb_headlen(skb), DMA_FROM_DEVICE); } else { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag), DMA_FROM_DEVICE); } + + /* If the page was released, just unmap it. */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); + } } /** @@ -8186,7 +8188,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@ -8605,7 +8607,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; @@ -8748,7 +8751,7 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, if (skb_put_padto(skb, 17)) return NETDEV_TX_OK; - tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; + tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)]; if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) return NETDEV_TX_BUSY; @@ -9490,6 +9493,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, jump->mat = nexthdr[i].jump; adapter->jump_tables[link_uhtid] = jump; break; + } else { + kfree(mask); + kfree(input); + kfree(jump); } } return 0; @@ -10262,7 +10269,8 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) if (need_reset && prog) for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->xdp_ring[i]->xsk_umem) - (void)ixgbe_xsk_async_xmit(adapter->netdev, i); + (void)ixgbe_xsk_wakeup(adapter->netdev, i, + XDP_WAKEUP_RX); return 0; } @@ -10381,7 +10389,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_features_check = ixgbe_features_check, .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, - .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit, + .ndo_xsk_wakeup = ixgbe_xsk_wakeup, }; static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index d93a690aff74..6d01700b46bc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -42,7 +42,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring); bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget); -int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id); +int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring); #endif /* #define _IXGBE_TXRX_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index a3b6d8c89127..100ac89b345d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -100,7 +100,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, ixgbe_txrx_ring_enable(adapter, qid); /* Kick start the NAPI context so that receiving will start */ - err = ixgbe_xsk_async_xmit(adapter->netdev, qid); + err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); if (err) return err; } @@ -143,15 +143,20 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) { + struct xdp_umem *umem = rx_ring->xsk_umem; int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; struct xdp_frame *xdpf; + u64 offset; u32 act; rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); - xdp->handle += xdp->data - xdp->data_hard_start; + offset = xdp->data - xdp->data_hard_start; + + xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); + switch (act) { case XDP_PASS: break; @@ -201,8 +206,6 @@ ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring, static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *obi) { - unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; - u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; u16 nta = rx_ring->next_to_alloc; struct ixgbe_rx_buffer *nbi; @@ -212,14 +215,9 @@ static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - nbi->dma = obi->dma & mask; - nbi->dma += hr; - - nbi->addr = (void *)((unsigned long)obi->addr & mask); - nbi->addr += hr; - - nbi->handle = obi->handle & mask; - nbi->handle += rx_ring->xsk_umem->headroom; + nbi->dma = obi->dma; + nbi->addr = obi->addr; + nbi->handle = obi->handle; obi->addr = NULL; obi->skb = NULL; @@ -250,7 +248,8 @@ void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); bi->addr += hr; - bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; + bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, + rx_ring->xsk_umem->headroom); } static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, @@ -276,7 +275,7 @@ static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); xsk_umem_discard_addr(umem); return true; @@ -303,7 +302,7 @@ static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); xsk_umem_discard_addr_rq(umem); return true; @@ -547,6 +546,14 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; + if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) + xsk_set_rx_need_wakeup(rx_ring->xsk_umem); + else + xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); + + return (int)total_rx_packets; + } return failure ? budget : (int)total_rx_packets; } @@ -615,6 +622,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) if (tx_desc) { ixgbe_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) + xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return !!budget && work_done; @@ -682,10 +691,17 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, if (xsk_frames) xsk_umem_complete_tx(umem, xsk_frames); + if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { + if (tx_ring->next_to_clean == tx_ring->next_to_use) + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); + else + xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); + } + return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); } -int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) +int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 72872d6ca80c..076f2da36f27 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2262,12 +2262,14 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_14, - ixgbe_mbox_api_13, - ixgbe_mbox_api_12, - ixgbe_mbox_api_11, - ixgbe_mbox_api_10, - ixgbe_mbox_api_unknown }; + static const int api[] = { + ixgbe_mbox_api_14, + ixgbe_mbox_api_13, + ixgbe_mbox_api_12, + ixgbe_mbox_api_11, + ixgbe_mbox_api_10, + ixgbe_mbox_api_unknown + }; int err, idx = 0; spin_lock_bh(&adapter->mbx_lock); @@ -2518,6 +2520,7 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) msleep(1); ixgbevf_down(adapter); + pci_set_master(adapter->pdev); ixgbevf_up(adapter); clear_bit(__IXGBEVF_RESETTING, &adapter->state); @@ -3950,7 +3953,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, struct sk_buff *skb = first->skb; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@ -4135,8 +4138,11 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + count += TXD_USE_COUNT(skb_frag_size(frag)); + } #else count += skb_shinfo(skb)->nr_frags; #endif diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 0b668357db4d..6d52cf5ce20e 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -2030,23 +2030,22 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) bool hidma = jme->dev->features & NETIF_F_HIGHDMA; int i, nr_frags = skb_shinfo(skb)->nr_frags; int mask = jme->tx_ring_mask; - const struct skb_frag_struct *frag; u32 len; int ret = 0; for (i = 0 ; i < nr_frags ; ++i) { - frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + ctxdesc = txdesc + ((idx + i + 2) & (mask)); ctxbi = txbi + ((idx + i + 2) & (mask)); ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, - skb_frag_page(frag), - frag->page_offset, skb_frag_size(frag), hidma); + skb_frag_page(frag), skb_frag_off(frag), + skb_frag_size(frag), hidma); if (ret) { jme_drop_tx_map(jme, idx, i); goto out; } - } len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; @@ -3193,8 +3192,7 @@ jme_shutdown(struct pci_dev *pdev) static int jme_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct jme_adapter *jme = netdev_priv(netdev); if (!netif_running(netdev)) @@ -3236,8 +3234,7 @@ jme_suspend(struct device *dev) static int jme_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct jme_adapter *jme = netdev_priv(netdev); if (!netif_running(netdev)) diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index cda641ef89af..900affbdcc0e 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -458,17 +458,11 @@ static int xrx200_probe(struct platform_device *pdev) } priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx"); - if (priv->chan_rx.dma.irq < 0) { - dev_err(dev, "failed to get RX IRQ, %i\n", - priv->chan_rx.dma.irq); + if (priv->chan_rx.dma.irq < 0) return -ENOENT; - } priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx"); - if (priv->chan_tx.dma.irq < 0) { - dev_err(dev, "failed to get TX IRQ, %i\n", - priv->chan_tx.dma.irq); + if (priv->chan_tx.dma.irq < 0) return -ENOENT; - } /* get the clock */ priv->clk = devm_clk_get(dev, NULL); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 88ea5ac83c93..82ea55ae5053 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -659,7 +659,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; - if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7) + if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7) return 1; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 895bfed26a8a..e49820675c8c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2350,10 +2350,10 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - void *addr = page_address(frag->page.p) + frag->page_offset; + void *addr = skb_frag_address(frag); tx_desc = mvneta_txq_next_desc_get(txq); - tx_desc->data_size = frag->size; + tx_desc->data_size = skb_frag_size(frag); tx_desc->buf_phys_addr = dma_map_single(pp->dev->dev.parent, addr, @@ -4469,7 +4469,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) /* Device initialization routine */ static int mvneta_probe(struct platform_device *pdev) { - struct resource *res; struct device_node *dn = pdev->dev.of_node; struct device_node *bm_node; struct mvneta_port *pp; @@ -4553,8 +4552,7 @@ static int mvneta_probe(struct platform_device *pdev) if (!IS_ERR(pp->clk_bus)) clk_prepare_enable(pp->clk_bus); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pp->base = devm_ioremap_resource(&pdev->dev, res); + pp->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pp->base)) { err = PTR_ERR(pp->base); goto err_clk; diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 82ee2bcca6fd..46c942ef2287 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -411,15 +411,13 @@ static int mvneta_bm_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; struct mvneta_bm *priv; - struct resource *res; int err; priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL); if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->reg_base = devm_ioremap_resource(&pdev->dev, res); + priv->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->reg_base)) return PTR_ERR(priv->reg_base); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 4d9564ba68f6..543a310ec102 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -683,6 +683,7 @@ enum mvpp2_prs_l3_cast { #define MVPP2_BM_SHORT_BUF_NUM 2048 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) #define MVPP2_BM_POOL_PTR_ALIGN 128 +#define MVPP2_BM_MAX_POOLS 8 /* BM cookie (32 bits) definition */ #define MVPP2_BM_COOKIE_POOL_OFFS 8 @@ -787,6 +788,9 @@ struct mvpp2 { /* Aggregated TXQs */ struct mvpp2_tx_queue *aggr_txqs; + /* Are we using page_pool with per-cpu pools? */ + int percpu_pools; + /* BM pools */ struct mvpp2_bm_pool *bm_pools; @@ -829,9 +833,8 @@ struct mvpp2_pcpu_stats { /* Per-CPU port control */ struct mvpp2_port_pcpu { struct hrtimer tx_done_timer; + struct net_device *dev; bool timer_scheduled; - /* Tasklet for egress finalization */ - struct tasklet_struct tx_done_tasklet; }; struct mvpp2_queue_vector { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index 274fb07362cb..4a3baa7e0142 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -452,8 +452,6 @@ static int mvpp2_dbgfs_flow_port_init(struct dentry *parent, struct dentry *port_dir; port_dir = debugfs_create_dir(port->dev->name, parent); - if (IS_ERR(port_dir)) - return PTR_ERR(port_dir); port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id]; @@ -480,8 +478,6 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent, sprintf(flow_entry_name, "%02d", flow); flow_entry_dir = debugfs_create_dir(flow_entry_name, parent); - if (!flow_entry_dir) - return -ENOMEM; entry = &priv->dbgfs_entries->flow_entries[flow]; @@ -514,8 +510,6 @@ static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv) int i, ret; flow_dir = debugfs_create_dir("flows", parent); - if (!flow_dir) - return -ENOMEM; for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) { ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i); @@ -539,8 +533,6 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent, sprintf(prs_entry_name, "%03d", tid); prs_entry_dir = debugfs_create_dir(prs_entry_name, parent); - if (!prs_entry_dir) - return -ENOMEM; entry = &priv->dbgfs_entries->prs_entries[tid]; @@ -578,8 +570,6 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv) int i, ret; prs_dir = debugfs_create_dir("parser", parent); - if (!prs_dir) - return -ENOMEM; for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) { ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i); @@ -688,8 +678,6 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent, struct dentry *port_dir; port_dir = debugfs_create_dir(port->dev->name, parent); - if (IS_ERR(port_dir)) - return PTR_ERR(port_dir); debugfs_create_file("parser_entries", 0444, port_dir, port, &mvpp2_dbgfs_port_parser_fops); @@ -716,15 +704,10 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) int ret, i; mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL); - if (!mvpp2_root) { + if (!mvpp2_root) mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); - if (IS_ERR(mvpp2_root)) - return; - } mvpp2_dir = debugfs_create_dir(name, mvpp2_root); - if (IS_ERR(mvpp2_dir)) - return; priv->dbgfs_dir = mvpp2_dir; priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index ccdd47f3b8fb..111b3b8239e1 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -292,6 +292,26 @@ static void mvpp2_txq_inc_put(struct mvpp2_port *port, txq_pcpu->txq_put_index = 0; } +/* Get number of maximum RXQ */ +static int mvpp2_get_nrxqs(struct mvpp2 *priv) +{ + unsigned int nrxqs; + + if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) + return 1; + + /* According to the PPv2.2 datasheet and our experiments on + * PPv2.1, RX queues have an allocation granularity of 4 (when + * more than a single one on PPv2.2). + * Round up to nearest multiple of 4. + */ + nrxqs = (num_possible_cpus() + 3) & ~0x3; + if (nrxqs > MVPP2_PORT_MAX_RXQ) + nrxqs = MVPP2_PORT_MAX_RXQ; + + return nrxqs; +} + /* Get number of physical egress port */ static inline int mvpp2_egress_port(struct mvpp2_port *port) { @@ -323,8 +343,7 @@ static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) /* Buffer Manager configuration routines */ /* Create pool */ -static int mvpp2_bm_pool_create(struct platform_device *pdev, - struct mvpp2 *priv, +static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, int size) { u32 val; @@ -343,7 +362,7 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, else bm_pool->size_bytes = 2 * sizeof(u64) * size; - bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes, + bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes, &bm_pool->dma_addr, GFP_KERNEL); if (!bm_pool->virt_addr) @@ -351,9 +370,9 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) { - dma_free_coherent(&pdev->dev, bm_pool->size_bytes, + dma_free_coherent(dev, bm_pool->size_bytes, bm_pool->virt_addr, bm_pool->dma_addr); - dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", + dev_err(dev, "BM pool %d is not %d bytes aligned\n", bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); return -ENOMEM; } @@ -468,15 +487,14 @@ static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_p } /* Cleanup pool */ -static int mvpp2_bm_pool_destroy(struct platform_device *pdev, - struct mvpp2 *priv, +static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) { int buf_num; u32 val; buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); - mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num); + mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); /* Check buffer counters after free */ buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); @@ -490,24 +508,26 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev, val |= MVPP2_BM_STOP_MASK; mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); - dma_free_coherent(&pdev->dev, bm_pool->size_bytes, + dma_free_coherent(dev, bm_pool->size_bytes, bm_pool->virt_addr, bm_pool->dma_addr); return 0; } -static int mvpp2_bm_pools_init(struct platform_device *pdev, - struct mvpp2 *priv) +static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) { - int i, err, size; + int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; struct mvpp2_bm_pool *bm_pool; + if (priv->percpu_pools) + poolnum = mvpp2_get_nrxqs(priv) * 2; + /* Create all pools with maximum size */ size = MVPP2_BM_POOL_SIZE_MAX; - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + for (i = 0; i < poolnum; i++) { bm_pool = &priv->bm_pools[i]; bm_pool->id = i; - err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size); + err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); if (err) goto err_unroll_pools; mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); @@ -515,17 +535,23 @@ static int mvpp2_bm_pools_init(struct platform_device *pdev, return 0; err_unroll_pools: - dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); + dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); for (i = i - 1; i >= 0; i--) - mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]); + mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); return err; } -static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) +static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) { - int i, err; + int i, err, poolnum = MVPP2_BM_POOLS_NUM; - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + if (priv->percpu_pools) + poolnum = mvpp2_get_nrxqs(priv) * 2; + + dev_info(dev, "using %d %s buffers\n", poolnum, + priv->percpu_pools ? "per-cpu" : "shared"); + + for (i = 0; i < poolnum; i++) { /* Mask BM all interrupts */ mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); /* Clear BM cause register */ @@ -533,12 +559,12 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) } /* Allocate and initialize BM pools */ - priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, + priv->bm_pools = devm_kcalloc(dev, poolnum, sizeof(*priv->bm_pools), GFP_KERNEL); if (!priv->bm_pools) return -ENOMEM; - err = mvpp2_bm_pools_init(pdev, priv); + err = mvpp2_bm_pools_init(dev, priv); if (err < 0) return err; return 0; @@ -679,6 +705,13 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, phys_addr_t phys_addr; void *buf; + if (port->priv->percpu_pools && + bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { + netdev_err(port->dev, + "attempted to use jumbo frames with per-cpu pools"); + return 0; + } + buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); total_size = MVPP2_RX_TOTAL_SIZE(buf_size); @@ -722,7 +755,64 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num; - if (pool >= MVPP2_BM_POOLS_NUM) { + if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) || + (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { + netdev_err(port->dev, "Invalid pool %d\n", pool); + return NULL; + } + + /* Allocate buffers in case BM pool is used as long pool, but packet + * size doesn't match MTU or BM pool hasn't being used yet + */ + if (new_pool->pkt_size == 0) { + int pkts_num; + + /* Set default buffer number or free all the buffers in case + * the pool is not empty + */ + pkts_num = new_pool->buf_num; + if (pkts_num == 0) { + if (port->priv->percpu_pools) { + if (pool < port->nrxqs) + pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; + else + pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num; + } else { + pkts_num = mvpp2_pools[pool].buf_num; + } + } else { + mvpp2_bm_bufs_free(port->dev->dev.parent, + port->priv, new_pool, pkts_num); + } + + new_pool->pkt_size = pkt_size; + new_pool->frag_size = + SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + + MVPP2_SKB_SHINFO_SIZE; + + /* Allocate buffers for this pool */ + num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); + if (num != pkts_num) { + WARN(1, "pool %d: %d of %d allocated\n", + new_pool->id, num, pkts_num); + return NULL; + } + } + + mvpp2_bm_pool_bufsize_set(port->priv, new_pool, + MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); + + return new_pool; +} + +static struct mvpp2_bm_pool * +mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, + unsigned int pool, int pkt_size) +{ + struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; + int num; + + if (pool > port->nrxqs * 2) { netdev_err(port->dev, "Invalid pool %d\n", pool); return NULL; } @@ -738,7 +828,7 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) */ pkts_num = new_pool->buf_num; if (pkts_num == 0) - pkts_num = mvpp2_pools[pool].buf_num; + pkts_num = mvpp2_pools[type].buf_num; else mvpp2_bm_bufs_free(port->dev->dev.parent, port->priv, new_pool, pkts_num); @@ -763,11 +853,11 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) return new_pool; } -/* Initialize pools for swf */ -static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) +/* Initialize pools for swf, shared buffers variant */ +static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) { - int rxq; enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; + int rxq; /* If port pkt_size is higher than 1518B: * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool @@ -811,6 +901,47 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) return 0; } +/* Initialize pools for swf, percpu buffers variant */ +static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) +{ + struct mvpp2_bm_pool *p; + int i; + + for (i = 0; i < port->nrxqs; i++) { + p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, + mvpp2_pools[MVPP2_BM_SHORT].pkt_size); + if (!p) + return -ENOMEM; + + port->priv->bm_pools[i].port_map |= BIT(port->id); + mvpp2_rxq_short_pool_set(port, i, port->priv->bm_pools[i].id); + } + + for (i = 0; i < port->nrxqs; i++) { + p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, + mvpp2_pools[MVPP2_BM_LONG].pkt_size); + if (!p) + return -ENOMEM; + + port->priv->bm_pools[i + port->nrxqs].port_map |= BIT(port->id); + mvpp2_rxq_long_pool_set(port, i, + port->priv->bm_pools[i + port->nrxqs].id); + } + + port->pool_long = NULL; + port->pool_short = NULL; + + return 0; +} + +static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) +{ + if (port->priv->percpu_pools) + return mvpp2_swf_bm_pool_init_percpu(port); + else + return mvpp2_swf_bm_pool_init_shared(port); +} + static void mvpp2_set_hw_csum(struct mvpp2_port *port, enum mvpp2_bm_pool_log_num new_long_pool) { @@ -837,6 +968,9 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) enum mvpp2_bm_pool_log_num new_long_pool; int pkt_size = MVPP2_RX_PKT_SIZE(mtu); + if (port->priv->percpu_pools) + goto out_set; + /* If port MTU is higher than 1518B: * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool @@ -866,6 +1000,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) mvpp2_set_hw_csum(port, new_long_pool); } +out_set: dev->mtu = mtu; dev->wanted_features = dev->features; @@ -2651,31 +2786,21 @@ handled: return IRQ_HANDLED; } -static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) -{ - ktime_t interval; - - if (!port_pcpu->timer_scheduled) { - port_pcpu->timer_scheduled = true; - interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS; - hrtimer_start(&port_pcpu->tx_done_timer, interval, - HRTIMER_MODE_REL_PINNED); - } -} - -static void mvpp2_tx_proc_cb(unsigned long data) +static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) { - struct net_device *dev = (struct net_device *)data; - struct mvpp2_port *port = netdev_priv(dev); + struct net_device *dev; + struct mvpp2_port *port; struct mvpp2_port_pcpu *port_pcpu; unsigned int tx_todo, cause; - port_pcpu = per_cpu_ptr(port->pcpu, - mvpp2_cpu_to_thread(port->priv, smp_processor_id())); + port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer); + dev = port_pcpu->dev; if (!netif_running(dev)) - return; + return HRTIMER_NORESTART; + port_pcpu->timer_scheduled = false; + port = netdev_priv(dev); /* Process all the Tx queues */ cause = (1 << port->ntxqs) - 1; @@ -2683,18 +2808,13 @@ static void mvpp2_tx_proc_cb(unsigned long data) mvpp2_cpu_to_thread(port->priv, smp_processor_id())); /* Set the timer in case not all the packets were processed */ - if (tx_todo) - mvpp2_timer_set(port_pcpu); -} - -static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) -{ - struct mvpp2_port_pcpu *port_pcpu = container_of(timer, - struct mvpp2_port_pcpu, - tx_done_timer); - - tasklet_schedule(&port_pcpu->tx_done_tasklet); + if (tx_todo && !port_pcpu->timer_scheduled) { + port_pcpu->timer_scheduled = true; + hrtimer_forward_now(&port_pcpu->tx_done_timer, + MVPP2_TXDONE_HRTIMER_PERIOD_NS); + return HRTIMER_RESTART; + } return HRTIMER_NORESTART; } @@ -2923,14 +3043,15 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - void *addr = page_address(frag->page.p) + frag->page_offset; + void *addr = skb_frag_address(frag); tx_desc = mvpp2_txq_next_desc_get(aggr_txq); mvpp2_txdesc_txq_set(port, tx_desc, txq->id); - mvpp2_txdesc_size_set(port, tx_desc, frag->size); + mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), + DMA_TO_DEVICE); if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { mvpp2_txq_desc_put(txq); goto cleanup; @@ -3181,7 +3302,12 @@ out: txq_pcpu->count > 0) { struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); - mvpp2_timer_set(port_pcpu); + if (!port_pcpu->timer_scheduled) { + port_pcpu->timer_scheduled = true; + hrtimer_start(&port_pcpu->tx_done_timer, + MVPP2_TXDONE_HRTIMER_PERIOD_NS, + HRTIMER_MODE_REL_PINNED_SOFT); + } } if (test_bit(thread, &port->priv->lock_map)) @@ -3618,7 +3744,6 @@ static int mvpp2_stop(struct net_device *dev) hrtimer_cancel(&port_pcpu->tx_done_timer); port_pcpu->timer_scheduled = false; - tasklet_kill(&port_pcpu->tx_done_tasklet); } } mvpp2_cleanup_rxqs(port); @@ -3709,10 +3834,48 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p) return err; } +/* Shut down all the ports, reconfigure the pools as percpu or shared, + * then bring up again all ports. + */ +static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) +{ + int numbufs = MVPP2_BM_POOLS_NUM, i; + struct mvpp2_port *port = NULL; + bool status[MVPP2_MAX_PORTS]; + + for (i = 0; i < priv->port_count; i++) { + port = priv->port_list[i]; + status[i] = netif_running(port->dev); + if (status[i]) + mvpp2_stop(port->dev); + } + + /* nrxqs is the same for all ports */ + if (priv->percpu_pools) + numbufs = port->nrxqs * 2; + + for (i = 0; i < numbufs; i++) + mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]); + + devm_kfree(port->dev->dev.parent, priv->bm_pools); + priv->percpu_pools = percpu; + mvpp2_bm_init(port->dev->dev.parent, priv); + + for (i = 0; i < priv->port_count; i++) { + port = priv->port_list[i]; + mvpp2_swf_bm_pool_init(port); + if (status[i]) + mvpp2_open(port->dev); + } + + return 0; +} + static int mvpp2_change_mtu(struct net_device *dev, int mtu) { struct mvpp2_port *port = netdev_priv(dev); bool running = netif_running(dev); + struct mvpp2 *priv = port->priv; int err; if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { @@ -3721,6 +3884,31 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); } + if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { + if (priv->percpu_pools) { + netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); + mvpp2_bm_switch_buffers(priv, false); + } + } else { + bool jumbo = false; + int i; + + for (i = 0; i < priv->port_count; i++) + if (priv->port_list[i] != port && + MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) > + MVPP2_BM_LONG_PKT_SIZE) { + jumbo = true; + break; + } + + /* No port is using jumbo frames */ + if (!jumbo) { + dev_info(port->dev->dev.parent, + "all ports have a low MTU, switching to per-cpu buffers"); + mvpp2_bm_switch_buffers(priv, true); + } + } + if (running) mvpp2_stop_dev(port); @@ -5010,7 +5198,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, struct device_node *port_node = to_of_node(port_fwnode); netdev_features_t features; struct net_device *dev; - struct resource *res; struct phylink *phylink; char *mac_from = ""; unsigned int ntxqs, nrxqs, thread; @@ -5028,18 +5215,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, } ntxqs = MVPP2_MAX_TXQ; - if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) { - nrxqs = 1; - } else { - /* According to the PPv2.2 datasheet and our experiments on - * PPv2.1, RX queues have an allocation granularity of 4 (when - * more than a single one on PPv2.2). - * Round up to nearest multiple of 4. - */ - nrxqs = (num_possible_cpus() + 3) & ~0x3; - if (nrxqs > MVPP2_PORT_MAX_RXQ) - nrxqs = MVPP2_PORT_MAX_RXQ; - } + nrxqs = mvpp2_get_nrxqs(priv); dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); if (!dev) @@ -5114,8 +5290,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->comphy = comphy; if (priv->hw_version == MVPP21) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); - port->base = devm_ioremap_resource(&pdev->dev, res); + port->base = devm_platform_ioremap_resource(pdev, 2 + id); if (IS_ERR(port->base)) { err = PTR_ERR(port->base); goto err_free_irq; @@ -5184,13 +5359,10 @@ static int mvpp2_port_probe(struct platform_device *pdev, port_pcpu = per_cpu_ptr(port->pcpu, thread); hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL_PINNED); + HRTIMER_MODE_REL_PINNED_SOFT); port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; port_pcpu->timer_scheduled = false; - - tasklet_init(&port_pcpu->tx_done_tasklet, - mvpp2_tx_proc_cb, - (unsigned long)dev); + port_pcpu->dev = dev; } } @@ -5205,7 +5377,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->features |= NETIF_F_NTUPLE; } - mvpp2_set_hw_csum(port, port->pool_long->id); + if (!port->priv->percpu_pools) + mvpp2_set_hw_csum(port, port->pool_long->id); dev->vlan_features |= features; dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; @@ -5497,7 +5670,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); /* Buffer Manager initialization */ - err = mvpp2_bm_init(pdev, priv); + err = mvpp2_bm_init(&pdev->dev, priv); if (err < 0) return err; @@ -5544,14 +5717,12 @@ static int mvpp2_probe(struct platform_device *pdev) if (priv->hw_version == MVPP21) queue_mode = MVPP2_QDIST_SINGLE_MODE; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); if (priv->hw_version == MVPP21) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->lms_base = devm_ioremap_resource(&pdev->dev, res); + priv->lms_base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(priv->lms_base)) return PTR_ERR(priv->lms_base); } else { @@ -5585,6 +5756,10 @@ static int mvpp2_probe(struct platform_device *pdev) priv->sysctrl_base = NULL; } + if (priv->hw_version == MVPP22 && + mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS) + priv->percpu_pools = 1; + mvpp2_setup_bm_pool(); @@ -5766,7 +5941,7 @@ static int mvpp2_remove(struct platform_device *pdev) for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; - mvpp2_bm_pool_destroy(pdev, priv, bm_pool); + mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); } for (i = 0; i < MVPP2_MAX_THREADS; i++) { diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3aa998797bc1..51b77c2de400 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1425,8 +1425,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) pep->dev = dev; pep->clk = clk; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pep->base = devm_ioremap_resource(&pdev->dev, res); + pep->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pep->base)) { err = -ENOMEM; goto err_netdev; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 9ac854c2b371..0a2ec387a482 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3731,7 +3731,6 @@ static int skge_device_event(struct notifier_block *unused, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct skge_port *skge; - struct dentry *d; if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) goto done; @@ -3739,33 +3738,20 @@ static int skge_device_event(struct notifier_block *unused, skge = netdev_priv(dev); switch (event) { case NETDEV_CHANGENAME: - if (skge->debugfs) { - d = debugfs_rename(skge_debug, skge->debugfs, - skge_debug, dev->name); - if (d) - skge->debugfs = d; - else { - netdev_info(dev, "rename failed\n"); - debugfs_remove(skge->debugfs); - } - } + if (skge->debugfs) + skge->debugfs = debugfs_rename(skge_debug, + skge->debugfs, + skge_debug, dev->name); break; case NETDEV_GOING_DOWN: - if (skge->debugfs) { - debugfs_remove(skge->debugfs); - skge->debugfs = NULL; - } + debugfs_remove(skge->debugfs); + skge->debugfs = NULL; break; case NETDEV_UP: - d = debugfs_create_file(dev->name, 0444, - skge_debug, dev, - &skge_debug_fops); - if (!d || IS_ERR(d)) - netdev_info(dev, "debugfs create failed\n"); - else - skge->debugfs = d; + skge->debugfs = debugfs_create_file(dev->name, 0444, skge_debug, + dev, &skge_debug_fops); break; } @@ -3780,15 +3766,8 @@ static struct notifier_block skge_notifier = { static __init void skge_debug_init(void) { - struct dentry *ent; + skge_debug = debugfs_create_dir("skge", NULL); - ent = debugfs_create_dir("skge", NULL); - if (!ent || IS_ERR(ent)) { - pr_info("debugfs create directory failed\n"); - return; - } - - skge_debug = ent; register_netdevice_notifier(&skge_notifier); } @@ -4078,8 +4057,7 @@ static void skge_remove(struct pci_dev *pdev) #ifdef CONFIG_PM_SLEEP static int skge_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct skge_hw *hw = pci_get_drvdata(pdev); + struct skge_hw *hw = dev_get_drvdata(dev); int i; if (!hw) @@ -4103,8 +4081,7 @@ static int skge_suspend(struct device *dev) static int skge_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct skge_hw *hw = pci_get_drvdata(pdev); + struct skge_hw *hw = dev_get_drvdata(dev); int i, err; if (!hw) diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index e0363870f3a5..5f56ee83e3b1 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5174,8 +5174,7 @@ static void sky2_remove(struct pci_dev *pdev) static int sky2_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct sky2_hw *hw = pci_get_drvdata(pdev); + struct sky2_hw *hw = dev_get_drvdata(dev); int i; if (!hw) diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index 1f7fff81f24d..4968352ba188 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config NET_VENDOR_MEDIATEK bool "MediaTek ethernet driver" - depends on ARCH_MEDIATEK || SOC_MT7621 + depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 ---help--- If you have a Mediatek SoC with ethernet, say Y. @@ -9,7 +9,7 @@ if NET_VENDOR_MEDIATEK config NET_MEDIATEK_SOC tristate "MediaTek SoC Gigabit Ethernet support" - select PHYLIB + select PHYLINK ---help--- This driver supports the gigabit ethernet MACs in the MediaTek SoC family. diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c index 7f05880cf9ef..ef11cf3d1ccc 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_path.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c @@ -239,10 +239,9 @@ out: return err; } -static int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id) +int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id) { - unsigned int val = 0; - int sid, err, path; + int err, path; path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII : MTK_ETH_PATH_GMAC2_SGMII; @@ -252,33 +251,10 @@ static int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id) if (err) return err; - /* The path GMAC to SGMII will be enabled once the SGMIISYS is being - * setup done. - */ - regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); - - regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, - SYSCFG0_SGMII_MASK, ~(u32)SYSCFG0_SGMII_MASK); - - /* Decide how GMAC and SGMIISYS be mapped */ - sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? 0 : mac_id; - - /* Setup SGMIISYS with the determined property */ - if (MTK_HAS_FLAGS(eth->sgmii->flags[sid], MTK_SGMII_PHYSPEED_AN)) - err = mtk_sgmii_setup_mode_an(eth->sgmii, sid); - else - err = mtk_sgmii_setup_mode_force(eth->sgmii, sid); - - if (err) - return err; - - regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, - SYSCFG0_SGMII_MASK, val); - return 0; } -static int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id) +int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id) { int err, path = 0; @@ -296,7 +272,7 @@ static int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id) return 0; } -static int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id) +int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id) { int err, path; @@ -311,42 +287,3 @@ static int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id) return 0; } -int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode) -{ - int err; - - switch (phymode) { - case PHY_INTERFACE_MODE_TRGMII: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_MII: - case PHY_INTERFACE_MODE_REVMII: - case PHY_INTERFACE_MODE_RMII: - if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { - err = mtk_gmac_rgmii_path_setup(eth, mac_id); - if (err) - return err; - } - break; - case PHY_INTERFACE_MODE_SGMII: - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { - err = mtk_gmac_sgmii_path_setup(eth, mac_id); - if (err) - return err; - } - break; - case PHY_INTERFACE_MODE_GMII: - if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { - err = mtk_gmac_gephy_path_setup(eth, mac_id); - if (err) - return err; - } - break; - default: - break; - } - - return 0; -} diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c39d7f4ab1d4..c61069340f4f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -18,6 +18,7 @@ #include <linux/tcp.h> #include <linux/interrupt.h> #include <linux/pinctrl/devinfo.h> +#include <linux/phylink.h> #include "mtk_eth_soc.h" @@ -186,165 +187,339 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) mtk_w32(eth, val, TRGMII_TCK_CTRL); } -static void mtk_phy_link_adjust(struct net_device *dev) +static void mtk_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) { - struct mtk_mac *mac = netdev_priv(dev); - u16 lcl_adv = 0, rmt_adv = 0; - u8 flowctrl; - u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | - MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | - MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | - MAC_MCR_BACKPR_EN; + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + struct mtk_eth *eth = mac->hw; + u32 mcr_cur, mcr_new, sid; + int val, ge_mode, err; + + /* MT76x8 has no hardware settings between for the MAC */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && + mac->interface != state->interface) { + /* Setup soc pin functions */ + switch (state->interface) { + case PHY_INTERFACE_MODE_TRGMII: + if (mac->id) + goto err_phy; + if (!MTK_HAS_CAPS(mac->hw->soc->caps, + MTK_GMAC1_TRGMII)) + goto err_phy; + /* fall through */ + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_REVMII: + case PHY_INTERFACE_MODE_RMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { + err = mtk_gmac_rgmii_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + case PHY_INTERFACE_MODE_SGMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { + err = mtk_gmac_sgmii_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + case PHY_INTERFACE_MODE_GMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { + err = mtk_gmac_gephy_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + default: + goto err_phy; + } - if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + /* Setup clock for 1st gmac */ + if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(state->interface) && + MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) { + if (MTK_HAS_CAPS(mac->hw->soc->caps, + MTK_TRGMII_MT7621_CLK)) { + if (mt7621_gmac0_rgmii_adjust(mac->hw, + state->interface)) + goto err_phy; + } else { + if (state->interface != + PHY_INTERFACE_MODE_TRGMII) + mtk_gmac0_rgmii_adjust(mac->hw, + state->speed); + } + } + + ge_mode = 0; + switch (state->interface) { + case PHY_INTERFACE_MODE_MII: + ge_mode = 1; + break; + case PHY_INTERFACE_MODE_REVMII: + ge_mode = 2; + break; + case PHY_INTERFACE_MODE_RMII: + if (mac->id) + goto err_phy; + ge_mode = 3; + break; + default: + break; + } + + /* put the gmac into the right mode */ + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); + val |= SYSCFG0_GE_MODE(ge_mode, mac->id); + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); + + mac->interface = state->interface; + } + + /* SGMII */ + if (state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(state->interface)) { + /* The path GMAC to SGMII will be enabled once the SGMIISYS is + * being setup done. + */ + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, + ~(u32)SYSCFG0_SGMII_MASK); + + /* Decide how GMAC and SGMIISYS be mapped */ + sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? + 0 : mac->id; + + /* Setup SGMIISYS with the determined property */ + if (state->interface != PHY_INTERFACE_MODE_SGMII) + err = mtk_sgmii_setup_mode_force(eth->sgmii, sid, + state); + else if (phylink_autoneg_inband(mode)) + err = mtk_sgmii_setup_mode_an(eth->sgmii, sid); + + if (err) + goto init_err; + + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, val); + } else if (phylink_autoneg_inband(mode)) { + dev_err(eth->dev, + "In-band mode not supported in non SGMII mode!\n"); return; + } - switch (dev->phydev->speed) { + /* Setup gmac */ + mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + mcr_new = mcr_cur; + mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 | + MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC | + MAC_MCR_FORCE_RX_FC); + mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | + MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK; + + switch (state->speed) { + case SPEED_2500: case SPEED_1000: - mcr |= MAC_MCR_SPEED_1000; + mcr_new |= MAC_MCR_SPEED_1000; break; case SPEED_100: - mcr |= MAC_MCR_SPEED_100; + mcr_new |= MAC_MCR_SPEED_100; break; } - - if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && !mac->id) { - if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) { - if (mt7621_gmac0_rgmii_adjust(mac->hw, - dev->phydev->interface)) - return; - } else { - if (!mac->trgmii) - mtk_gmac0_rgmii_adjust(mac->hw, - dev->phydev->speed); - } + if (state->duplex == DUPLEX_FULL) { + mcr_new |= MAC_MCR_FORCE_DPX; + if (state->pause & MLO_PAUSE_TX) + mcr_new |= MAC_MCR_FORCE_TX_FC; + if (state->pause & MLO_PAUSE_RX) + mcr_new |= MAC_MCR_FORCE_RX_FC; } - if (dev->phydev->link) - mcr |= MAC_MCR_FORCE_LINK; + /* Only update control register when needed! */ + if (mcr_new != mcr_cur) + mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); - if (dev->phydev->duplex) { - mcr |= MAC_MCR_FORCE_DPX; + return; - if (dev->phydev->pause) - rmt_adv = LPA_PAUSE_CAP; - if (dev->phydev->asym_pause) - rmt_adv |= LPA_PAUSE_ASYM; +err_phy: + dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, + mac->id, phy_modes(state->interface)); + return; - lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising); - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); +init_err: + dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, + mac->id, phy_modes(state->interface), err); +} - if (flowctrl & FLOW_CTRL_TX) - mcr |= MAC_MCR_FORCE_TX_FC; - if (flowctrl & FLOW_CTRL_RX) - mcr |= MAC_MCR_FORCE_RX_FC; +static int mtk_mac_link_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id)); - netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n", - flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled", - flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled"); + state->link = (pmsr & MAC_MSR_LINK); + state->duplex = (pmsr & MAC_MSR_DPX) >> 1; + + switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) { + case 0: + state->speed = SPEED_10; + break; + case MAC_MSR_SPEED_100: + state->speed = SPEED_100; + break; + case MAC_MSR_SPEED_1000: + state->speed = SPEED_1000; + break; + default: + state->speed = SPEED_UNKNOWN; + break; } - mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); + state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX); + if (pmsr & MAC_MSR_RX_FC) + state->pause |= MLO_PAUSE_RX; + if (pmsr & MAC_MSR_TX_FC) + state->pause |= MLO_PAUSE_TX; - if (!of_phy_is_fixed_link(mac->of_node)) - phy_print_status(dev->phydev); + return 1; } -static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac, - struct device_node *phy_node) +static void mtk_mac_an_restart(struct phylink_config *config) { - struct phy_device *phydev; - int phy_mode; - - phy_mode = of_get_phy_mode(phy_node); - if (phy_mode < 0) { - dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode); - return -EINVAL; - } + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); - phydev = of_phy_connect(eth->netdev[mac->id], phy_node, - mtk_phy_link_adjust, 0, phy_mode); - if (!phydev) { - dev_err(eth->dev, "could not connect to PHY\n"); - return -ENODEV; - } + mtk_sgmii_restart_an(mac->hw, mac->id); +} - dev_info(eth->dev, - "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n", - mac->id, phydev_name(phydev), phydev->phy_id, - phydev->drv->name); +static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - return 0; + mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN); + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } -static int mtk_phy_connect(struct net_device *dev) +static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode, + phy_interface_t interface, + struct phy_device *phy) { - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth; - struct device_node *np; - u32 val; - int err; + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - eth = mac->hw; - np = of_parse_phandle(mac->of_node, "phy-handle", 0); - if (!np && of_phy_is_fixed_link(mac->of_node)) - if (!of_phy_register_fixed_link(mac->of_node)) - np = of_node_get(mac->of_node); - if (!np) - return -ENODEV; + mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN; + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); +} - err = mtk_setup_hw_path(eth, mac->id, of_get_phy_mode(np)); - if (err) - goto err_phy; +static void mtk_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_GMII && + !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) && + phy_interface_mode_is_rgmii(state->interface)) && + !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && + !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) && + !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) && + (state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(state->interface)))) { + linkmode_zero(supported); + return; + } + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); - mac->ge_mode = 0; - switch (of_get_phy_mode(np)) { + switch (state->interface) { case PHY_INTERFACE_MODE_TRGMII: - mac->trgmii = true; - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_ID: + phylink_set(mask, 1000baseT_Full); + break; + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseX_Full); + break; + case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + phylink_set(mask, 1000baseT_Half); + /* fall through */ case PHY_INTERFACE_MODE_SGMII: - break; + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + /* fall through */ case PHY_INTERFACE_MODE_MII: - case PHY_INTERFACE_MODE_GMII: - mac->ge_mode = 1; - break; - case PHY_INTERFACE_MODE_REVMII: - mac->ge_mode = 2; - break; case PHY_INTERFACE_MODE_RMII: - if (!mac->id) - goto err_phy; - mac->ge_mode = 3; - break; + case PHY_INTERFACE_MODE_REVMII: + case PHY_INTERFACE_MODE_NA: default: - goto err_phy; + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + break; } - /* put the gmac into the right mode */ - regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); - val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); - val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id); - regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); - - /* couple phydev to net_device */ - if (mtk_phy_connect_node(eth, mac, np)) - goto err_phy; + if (state->interface == PHY_INTERFACE_MODE_NA) { + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseX_Full); + } + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + phylink_set(mask, 1000baseX_Full); + } + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + } - of_node_put(np); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); - return 0; + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); -err_phy: - if (of_phy_is_fixed_link(mac->of_node)) - of_phy_deregister_fixed_link(mac->of_node); - of_node_put(np); - dev_err(eth->dev, "%s: invalid phy\n", __func__); - return -EINVAL; + /* We can only operate at 2500BaseX or 1000BaseX. If requested + * to advertise both, only report advertising at 2500BaseX. + */ + phylink_helper_basex_speed(state); } +static const struct phylink_mac_ops mtk_phylink_ops = { + .validate = mtk_validate, + .mac_link_state = mtk_mac_link_state, + .mac_an_restart = mtk_mac_an_restart, + .mac_config = mtk_mac_config, + .mac_link_down = mtk_mac_link_down, + .mac_link_up = mtk_mac_link_up, +}; + static int mtk_mdio_init(struct mtk_eth *eth) { struct device_node *mii_np; @@ -395,8 +570,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) u32 val; spin_lock_irqsave(ð->tx_irq_lock, flags); - val = mtk_r32(eth, MTK_QDMA_INT_MASK); - mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); + val = mtk_r32(eth, eth->tx_int_mask_reg); + mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg); spin_unlock_irqrestore(ð->tx_irq_lock, flags); } @@ -406,8 +581,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) u32 val; spin_lock_irqsave(ð->tx_irq_lock, flags); - val = mtk_r32(eth, MTK_QDMA_INT_MASK); - mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); + val = mtk_r32(eth, eth->tx_int_mask_reg); + mtk_w32(eth, val | mask, eth->tx_int_mask_reg); spin_unlock_irqrestore(ð->tx_irq_lock, flags); } @@ -437,6 +612,7 @@ static int mtk_set_mac_address(struct net_device *dev, void *p) { int ret = eth_mac_addr(dev, p); struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; const char *macaddr = dev->dev_addr; if (ret) @@ -446,11 +622,19 @@ static int mtk_set_mac_address(struct net_device *dev, void *p) return -EBUSY; spin_lock_bh(&mac->hw->page_lock); - mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], - MTK_GDMA_MAC_ADRH(mac->id)); - mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | - (macaddr[4] << 8) | macaddr[5], - MTK_GDMA_MAC_ADRL(mac->id)); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], + MT7628_SDM_MAC_ADRH); + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | macaddr[5], + MT7628_SDM_MAC_ADRL); + } else { + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], + MTK_GDMA_MAC_ADRH(mac->id)); + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | macaddr[5], + MTK_GDMA_MAC_ADRL(mac->id)); + } spin_unlock_bh(&mac->hw->page_lock); return 0; @@ -626,19 +810,47 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, return &ring->buf[idx]; } +static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring, + struct mtk_tx_dma *dma) +{ + return ring->dma_pdma - ring->dma + dma; +} + +static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma) +{ + return ((void *)dma - (void *)ring->dma) / sizeof(*dma); +} + static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) { - if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { - dma_unmap_single(eth->dev, - dma_unmap_addr(tx_buf, dma_addr0), - dma_unmap_len(tx_buf, dma_len0), - DMA_TO_DEVICE); - } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { - dma_unmap_page(eth->dev, - dma_unmap_addr(tx_buf, dma_addr0), - dma_unmap_len(tx_buf, dma_len0), - DMA_TO_DEVICE); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { + dma_unmap_single(eth->dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { + dma_unmap_page(eth->dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + } else { + if (dma_unmap_len(tx_buf, dma_len0)) { + dma_unmap_page(eth->dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + + if (dma_unmap_len(tx_buf, dma_len1)) { + dma_unmap_page(eth->dev, + dma_unmap_addr(tx_buf, dma_addr1), + dma_unmap_len(tx_buf, dma_len1), + DMA_TO_DEVICE); + } } + tx_buf->flags = 0; if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) @@ -646,19 +858,45 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) tx_buf->skb = NULL; } +static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, + struct mtk_tx_dma *txd, dma_addr_t mapped_addr, + size_t size, int idx) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, size); + } else { + if (idx & 1) { + txd->txd3 = mapped_addr; + txd->txd2 |= TX_DMA_PLEN1(size); + dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len1, size); + } else { + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; + txd->txd1 = mapped_addr; + txd->txd2 = TX_DMA_PLEN0(size); + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, size); + } + } +} + static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, int tx_num, struct mtk_tx_ring *ring, bool gso) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; struct mtk_tx_dma *itxd, *txd; + struct mtk_tx_dma *itxd_pdma, *txd_pdma; struct mtk_tx_buf *itx_buf, *tx_buf; dma_addr_t mapped_addr; unsigned int nr_frags; int i, n_desc = 1; u32 txd4 = 0, fport; + int k = 0; itxd = ring->next_free; + itxd_pdma = qdma_to_pdma(ring, itxd); if (itxd == ring->last_free) return -ENOMEM; @@ -689,26 +927,37 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; - dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr); - dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb)); + setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb), + k++); /* TX SG offload */ txd = itxd; + txd_pdma = qdma_to_pdma(ring, txd); nr_frags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; unsigned int offset = 0; int frag_size = skb_frag_size(frag); while (frag_size) { bool last_frag = false; unsigned int frag_map_size; + bool new_desc = true; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) || + (i & 0x1)) { + txd = mtk_qdma_phys_to_virt(ring, txd->txd2); + txd_pdma = qdma_to_pdma(ring, txd); + if (txd == ring->last_free) + goto err_dma; + + n_desc++; + } else { + new_desc = false; + } - txd = mtk_qdma_phys_to_virt(ring, txd->txd2); - if (txd == ring->last_free) - goto err_dma; - n_desc++; frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, frag_map_size, @@ -727,14 +976,16 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, WRITE_ONCE(txd->txd4, fport); tx_buf = mtk_desc_to_tx_buf(ring, txd); - memset(tx_buf, 0, sizeof(*tx_buf)); + if (new_desc) + memset(tx_buf, 0, sizeof(*tx_buf)); tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; tx_buf->flags |= MTK_TX_FLAGS_PAGE0; tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; - dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); - dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); + setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr, + frag_map_size, k++); + frag_size -= frag_map_size; offset += frag_map_size; } @@ -746,6 +997,12 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, WRITE_ONCE(itxd->txd4, txd4); WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | (!nr_frags * TX_DMA_LS0))); + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (k & 0x1) + txd_pdma->txd2 |= TX_DMA_LS0; + else + txd_pdma->txd2 |= TX_DMA_LS1; + } netdev_sent_queue(dev, skb->len); skb_tx_timestamp(skb); @@ -758,9 +1015,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, */ wmb(); - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || - !netdev_xmit_more()) - mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || + !netdev_xmit_more()) + mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); + } else { + int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd), + ring->dma_size); + mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); + } return 0; @@ -772,7 +1035,11 @@ err_dma: mtk_tx_unmap(eth, tx_buf); itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + itxd_pdma->txd2 = TX_DMA_DESP2_DEF; + itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); + itxd_pdma = qdma_to_pdma(ring, itxd); } while (itxd != txd); return -ENOMEM; @@ -781,13 +1048,14 @@ err_dma: static inline int mtk_cal_txd_req(struct sk_buff *skb) { int i, nfrags; - struct skb_frag_struct *frag; + skb_frag_t *frag; nfrags = 1; if (skb_is_gso(skb)) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; - nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN); + nfrags += DIV_ROUND_UP(skb_frag_size(frag), + MTK_TX_DMA_BUF_LEN); } } else { nfrags += skb_shinfo(skb)->nr_frags; @@ -902,7 +1170,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { ring = ð->rx_ring[i]; - idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); + idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); if (ring->dma[idx].rxd2 & RX_DMA_DONE) { ring->calc_idx_update = true; return ring; @@ -945,13 +1213,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, struct net_device *netdev; unsigned int pktlen; dma_addr_t dma_addr; - int mac = 0; + int mac; ring = mtk_get_rx_ring(eth); if (unlikely(!ring)) goto rx_done; - idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); + idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); rxd = &ring->dma[idx]; data = ring->data[idx]; @@ -960,9 +1228,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, break; /* find out which mac the packet come from. values start at 1 */ - mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & - RX_DMA_FPORT_MASK; - mac--; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + mac = 0; + } else { + mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & + RX_DMA_FPORT_MASK; + mac--; + } if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || !eth->netdev[mac])) @@ -980,7 +1252,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; } dma_addr = dma_map_single(eth->dev, - new_data + NET_SKB_PAD, + new_data + NET_SKB_PAD + + eth->ip_align, ring->buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { @@ -1003,7 +1276,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); skb->dev = netdev; skb_put(skb, pktlen); - if (trxd.rxd4 & RX_DMA_L4_VALID) + if (trxd.rxd4 & eth->rx_dma_l4_valid) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); @@ -1020,7 +1293,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, rxd->rxd1 = (unsigned int)dma_addr; release_desc: - rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + rxd->rxd2 = RX_DMA_LSO; + else + rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); ring->calc_idx = idx; @@ -1039,19 +1315,14 @@ rx_done: return done; } -static int mtk_poll_tx(struct mtk_eth *eth, int budget) +static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, + unsigned int *done, unsigned int *bytes) { struct mtk_tx_ring *ring = ð->tx_ring; struct mtk_tx_dma *desc; struct sk_buff *skb; struct mtk_tx_buf *tx_buf; - unsigned int done[MTK_MAX_DEVS]; - unsigned int bytes[MTK_MAX_DEVS]; u32 cpu, dma; - int total = 0, i; - - memset(done, 0, sizeof(done)); - memset(bytes, 0, sizeof(bytes)); cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); dma = mtk_r32(eth, MTK_QTX_DRX_PTR); @@ -1089,6 +1360,62 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); + return budget; +} + +static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, + unsigned int *done, unsigned int *bytes) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_tx_dma *desc; + struct sk_buff *skb; + struct mtk_tx_buf *tx_buf; + u32 cpu, dma; + + cpu = ring->cpu_idx; + dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); + + while ((cpu != dma) && budget) { + tx_buf = &ring->buf[cpu]; + skb = tx_buf->skb; + if (!skb) + break; + + if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { + bytes[0] += skb->len; + done[0]++; + budget--; + } + + mtk_tx_unmap(eth, tx_buf); + + desc = &ring->dma[cpu]; + ring->last_free = desc; + atomic_inc(&ring->free_count); + + cpu = NEXT_DESP_IDX(cpu, ring->dma_size); + } + + ring->cpu_idx = cpu; + + return budget; +} + +static int mtk_poll_tx(struct mtk_eth *eth, int budget) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + unsigned int done[MTK_MAX_DEVS]; + unsigned int bytes[MTK_MAX_DEVS]; + int total = 0, i; + + memset(done, 0, sizeof(done)); + memset(bytes, 0, sizeof(bytes)); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + budget = mtk_poll_tx_qdma(eth, budget, done, bytes); + else + budget = mtk_poll_tx_pdma(eth, budget, done, bytes); + for (i = 0; i < MTK_MAC_COUNT; i++) { if (!eth->netdev[i] || !done[i]) continue; @@ -1120,13 +1447,14 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) u32 status, mask; int tx_done = 0; - mtk_handle_status_irq(eth); - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_handle_status_irq(eth); + mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg); tx_done = mtk_poll_tx(eth, budget); if (unlikely(netif_msg_intr(eth))) { - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - mask = mtk_r32(eth, MTK_QDMA_INT_MASK); + status = mtk_r32(eth, eth->tx_int_status_reg); + mask = mtk_r32(eth, eth->tx_int_mask_reg); dev_info(eth->dev, "done tx %d, intr 0x%08x/0x%x\n", tx_done, status, mask); @@ -1135,7 +1463,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) if (tx_done == budget) return budget; - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + status = mtk_r32(eth, eth->tx_int_status_reg); if (status & MTK_TX_DONE_INT) return budget; @@ -1202,6 +1530,24 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; } + /* On MT7688 (PDMA only) this driver uses the ring->dma structs + * only as the framework. The real HW descriptors are the PDMA + * descriptors in ring->dma_pdma. + */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, + &ring->phys_pdma, + GFP_ATOMIC); + if (!ring->dma_pdma) + goto no_tx_mem; + + for (i = 0; i < MTK_DMA_SIZE; i++) { + ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF; + ring->dma_pdma[i].txd4 = 0; + } + } + + ring->dma_size = MTK_DMA_SIZE; atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); ring->next_free = &ring->dma[0]; ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; @@ -1212,15 +1558,23 @@ static int mtk_tx_alloc(struct mtk_eth *eth) */ wmb(); - mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); - mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); - mtk_w32(eth, - ring->phys + ((MTK_DMA_SIZE - 1) * sz), - MTK_QTX_CRX_PTR); - mtk_w32(eth, - ring->phys + ((MTK_DMA_SIZE - 1) * sz), - MTK_QTX_DRX_PTR); - mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); + mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); + mtk_w32(eth, + ring->phys + ((MTK_DMA_SIZE - 1) * sz), + MTK_QTX_CRX_PTR); + mtk_w32(eth, + ring->phys + ((MTK_DMA_SIZE - 1) * sz), + MTK_QTX_DRX_PTR); + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, + MTK_QTX_CFG(0)); + } else { + mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); + mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0); + mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); + mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX); + } return 0; @@ -1247,6 +1601,14 @@ static void mtk_tx_clean(struct mtk_eth *eth) ring->phys); ring->dma = NULL; } + + if (ring->dma_pdma) { + dma_free_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(*ring->dma_pdma), + ring->dma_pdma, + ring->phys_pdma); + ring->dma_pdma = NULL; + } } static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) @@ -1294,14 +1656,17 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) for (i = 0; i < rx_dma_size; i++) { dma_addr_t dma_addr = dma_map_single(eth->dev, - ring->data[i] + NET_SKB_PAD, + ring->data[i] + NET_SKB_PAD + eth->ip_align, ring->buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(eth->dev, dma_addr))) return -ENOMEM; ring->dma[i].rxd1 = (unsigned int)dma_addr; - ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + ring->dma[i].rxd2 = RX_DMA_LSO; + else + ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); } ring->dma_size = rx_dma_size; ring->calc_idx_update = false; @@ -1617,9 +1982,16 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth) unsigned long t_start = jiffies; while (1) { - if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & - (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) - return 0; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) + return 0; + } else { + if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) & + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) + return 0; + } + if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) break; } @@ -1636,20 +2008,24 @@ static int mtk_dma_init(struct mtk_eth *eth) if (mtk_dma_busy_wait(eth)) return -EBUSY; - /* QDMA needs scratch memory for internal reordering of the - * descriptors - */ - err = mtk_init_fq_dma(eth); - if (err) - return err; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + /* QDMA needs scratch memory for internal reordering of the + * descriptors + */ + err = mtk_init_fq_dma(eth); + if (err) + return err; + } err = mtk_tx_alloc(eth); if (err) return err; - err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); - if (err) - return err; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); + if (err) + return err; + } err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); if (err) @@ -1666,10 +2042,14 @@ static int mtk_dma_init(struct mtk_eth *eth) return err; } - /* Enable random early drop and set drop threshold automatically */ - mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN, - MTK_QDMA_FC_THRES); - mtk_w32(eth, 0x0, MTK_QDMA_HRED2); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + /* Enable random early drop and set drop threshold + * automatically + */ + mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | + FC_THRES_MIN, MTK_QDMA_FC_THRES); + mtk_w32(eth, 0x0, MTK_QDMA_HRED2); + } return 0; } @@ -1745,8 +2125,8 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth) if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT) mtk_handle_irq_rx(irq, _eth); } - if (mtk_r32(eth, MTK_QDMA_INT_MASK) & MTK_TX_DONE_INT) { - if (mtk_r32(eth, MTK_QMTK_INT_STATUS) & MTK_TX_DONE_INT) + if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) { + if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) mtk_handle_irq_tx(irq, _eth); } @@ -1778,17 +2158,23 @@ static int mtk_start_dma(struct mtk_eth *eth) return err; } - mtk_w32(eth, - MTK_TX_WB_DDONE | MTK_TX_DMA_EN | - MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | - MTK_RX_BT_32DWORDS, - MTK_QDMA_GLO_CFG); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + mtk_w32(eth, + MTK_TX_WB_DDONE | MTK_TX_DMA_EN | + MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | + MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_BT_32DWORDS, + MTK_QDMA_GLO_CFG); - mtk_w32(eth, - MTK_RX_DMA_EN | rx_2b_offset | - MTK_RX_BT_32DWORDS | MTK_MULTI_EN, - MTK_PDMA_GLO_CFG); + mtk_w32(eth, + MTK_RX_DMA_EN | rx_2b_offset | + MTK_RX_BT_32DWORDS | MTK_MULTI_EN, + MTK_PDMA_GLO_CFG); + } else { + mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | + MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS, + MTK_PDMA_GLO_CFG); + } return 0; } @@ -1797,6 +2183,14 @@ static int mtk_open(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; + int err; + + err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); + if (err) { + netdev_err(dev, "%s: could not attach PHY: %d\n", __func__, + err); + return err; + } /* we run 2 netdevs on the same dma ring so we only bring it up once */ if (!refcount_read(ð->dma_refcnt)) { @@ -1814,9 +2208,8 @@ static int mtk_open(struct net_device *dev) else refcount_inc(ð->dma_refcnt); - phy_start(dev->phydev); + phylink_start(mac->phylink); netif_start_queue(dev); - return 0; } @@ -1848,8 +2241,11 @@ static int mtk_stop(struct net_device *dev) struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; + phylink_stop(mac->phylink); + netif_tx_disable(dev); - phy_stop(dev->phydev); + + phylink_disconnect_phy(mac->phylink); /* only shutdown DMA if this is the last user */ if (!refcount_dec_and_test(ð->dma_refcnt)) @@ -1860,7 +2256,8 @@ static int mtk_stop(struct net_device *dev) napi_disable(ð->tx_napi); napi_disable(ð->rx_napi); - mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); mtk_dma_free(eth); @@ -1922,17 +2319,26 @@ static int mtk_hw_init(struct mtk_eth *eth) if (ret) goto err_disable_pm; - ethsys_reset(eth, RSTCTRL_FE); - ethsys_reset(eth, RSTCTRL_PPE); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + ret = device_reset(eth->dev); + if (ret) { + dev_err(eth->dev, "MAC reset failed!\n"); + goto err_disable_pm; + } - regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); - for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!eth->mac[i]) - continue; - val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id); - val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id); + /* enable interrupt delay for RX */ + mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT); + + /* disable delay and normal interrupt */ + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); + + return 0; } - regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); + + /* Non-MT7628 handling... */ + ethsys_reset(eth, RSTCTRL_FE); + ethsys_reset(eth, RSTCTRL_PPE); if (eth->pctl) { /* Set GE2 driving and slew rate */ @@ -1946,11 +2352,11 @@ static int mtk_hw_init(struct mtk_eth *eth) } /* Set linkdown as the default for each GMAC. Its own MCR would be set - * up with the more appropriate value when mtk_phy_link_adjust call is - * being invoked. + * up with the more appropriate value when mtk_mac_config call is being + * invoked. */ for (i = 0; i < MTK_MAC_COUNT; i++) - mtk_w32(eth, 0, MTK_MAC_MCR(i)); + mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); /* Indicates CDM to parse the MTK special tag from CPU * which also is working out for untag packets. @@ -1978,7 +2384,7 @@ static int mtk_hw_init(struct mtk_eth *eth) mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); - for (i = 0; i < 2; i++) { + for (i = 0; i < MTK_MAC_COUNT; i++) { u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); /* setup the forward port to send frame to PDMA */ @@ -2030,7 +2436,7 @@ static int __init mtk_init(struct net_device *dev) dev->dev_addr); } - return mtk_phy_connect(dev); + return 0; } static void mtk_uninit(struct net_device *dev) @@ -2038,20 +2444,20 @@ static void mtk_uninit(struct net_device *dev) struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - phy_disconnect(dev->phydev); - if (of_phy_is_fixed_link(mac->of_node)) - of_phy_deregister_fixed_link(mac->of_node); + phylink_disconnect_phy(mac->phylink); mtk_tx_irq_disable(eth, ~0); mtk_rx_irq_disable(eth, ~0); } static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + struct mtk_mac *mac = netdev_priv(dev); + switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: - return phy_mii_ioctl(dev->phydev, ifr, cmd); + return phylink_mii_ioctl(mac->phylink, ifr, cmd); default: break; } @@ -2092,16 +2498,6 @@ static void mtk_pending_work(struct work_struct *work) eth->dev->pins->default_state); mtk_hw_init(eth); - for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!eth->mac[i] || - of_phy_is_fixed_link(eth->mac[i]->of_node)) - continue; - err = phy_init_hw(eth->netdev[i]->phydev); - if (err) - dev_err(eth->dev, "%s: PHY init failed.\n", - eth->netdev[i]->name); - } - /* restart DMA and enable IRQs */ for (i = 0; i < MTK_MAC_COUNT; i++) { if (!test_bit(i, &restart)) @@ -2164,9 +2560,7 @@ static int mtk_get_link_ksettings(struct net_device *ndev, if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) return -EBUSY; - phy_ethtool_ksettings_get(ndev->phydev, cmd); - - return 0; + return phylink_ethtool_ksettings_get(mac->phylink, cmd); } static int mtk_set_link_ksettings(struct net_device *ndev, @@ -2177,7 +2571,7 @@ static int mtk_set_link_ksettings(struct net_device *ndev, if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) return -EBUSY; - return phy_ethtool_ksettings_set(ndev->phydev, cmd); + return phylink_ethtool_ksettings_set(mac->phylink, cmd); } static void mtk_get_drvinfo(struct net_device *dev, @@ -2211,22 +2605,10 @@ static int mtk_nway_reset(struct net_device *dev) if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) return -EBUSY; - return genphy_restart_aneg(dev->phydev); -} + if (!mac->phylink) + return -ENOTSUPP; -static u32 mtk_get_link(struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - int err; - - if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) - return -EBUSY; - - err = genphy_update_link(dev->phydev); - if (err) - return ethtool_op_get_link(dev); - - return dev->phydev->link; + return phylink_ethtool_nway_reset(mac->phylink); } static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -2346,7 +2728,7 @@ static const struct ethtool_ops mtk_ethtool_ops = { .get_msglevel = mtk_get_msglevel, .set_msglevel = mtk_set_msglevel, .nway_reset = mtk_nway_reset, - .get_link = mtk_get_link, + .get_link = ethtool_op_get_link, .get_strings = mtk_get_strings, .get_sset_count = mtk_get_sset_count, .get_ethtool_stats = mtk_get_ethtool_stats, @@ -2374,9 +2756,10 @@ static const struct net_device_ops mtk_netdev_ops = { static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) { - struct mtk_mac *mac; const __be32 *_id = of_get_property(np, "reg", NULL); - int id, err; + struct phylink *phylink; + int phy_mode, id, err; + struct mtk_mac *mac; if (!_id) { dev_err(eth->dev, "missing mac id\n"); @@ -2420,18 +2803,44 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) u64_stats_init(&mac->hw_stats->syncp); mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; + /* phylink create */ + phy_mode = of_get_phy_mode(np); + if (phy_mode < 0) { + dev_err(eth->dev, "incorrect phy-mode\n"); + err = -EINVAL; + goto free_netdev; + } + + /* mac config is not set */ + mac->interface = PHY_INTERFACE_MODE_NA; + mac->mode = MLO_AN_PHY; + mac->speed = SPEED_UNKNOWN; + + mac->phylink_config.dev = ð->netdev[id]->dev; + mac->phylink_config.type = PHYLINK_NETDEV; + + phylink = phylink_create(&mac->phylink_config, + of_fwnode_handle(mac->of_node), + phy_mode, &mtk_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto free_netdev; + } + + mac->phylink = phylink; + SET_NETDEV_DEV(eth->netdev[id], eth->dev); eth->netdev[id]->watchdog_timeo = 5 * HZ; eth->netdev[id]->netdev_ops = &mtk_netdev_ops; eth->netdev[id]->base_addr = (unsigned long)eth->base; - eth->netdev[id]->hw_features = MTK_HW_FEATURES; + eth->netdev[id]->hw_features = eth->soc->hw_features; if (eth->hwlro) eth->netdev[id]->hw_features |= NETIF_F_LRO; - eth->netdev[id]->vlan_features = MTK_HW_FEATURES & + eth->netdev[id]->vlan_features = eth->soc->hw_features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); - eth->netdev[id]->features |= MTK_HW_FEATURES; + eth->netdev[id]->features |= eth->soc->hw_features; eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; eth->netdev[id]->irq = eth->irq[0]; @@ -2446,11 +2855,9 @@ free_netdev: static int mtk_probe(struct platform_device *pdev) { - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct device_node *mac_np; struct mtk_eth *eth; - int err; - int i; + int err, i; eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); if (!eth) @@ -2459,19 +2866,36 @@ static int mtk_probe(struct platform_device *pdev) eth->soc = of_device_get_match_data(&pdev->dev); eth->dev = &pdev->dev; - eth->base = devm_ioremap_resource(&pdev->dev, res); + eth->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(eth->base)) return PTR_ERR(eth->base); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + eth->tx_int_mask_reg = MTK_QDMA_INT_MASK; + eth->tx_int_status_reg = MTK_QDMA_INT_STATUS; + } else { + eth->tx_int_mask_reg = MTK_PDMA_INT_MASK; + eth->tx_int_status_reg = MTK_PDMA_INT_STATUS; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA; + eth->ip_align = NET_IP_ALIGN; + } else { + eth->rx_dma_l4_valid = RX_DMA_L4_VALID; + } + spin_lock_init(ð->page_lock); spin_lock_init(ð->tx_irq_lock); spin_lock_init(ð->rx_irq_lock); - eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "mediatek,ethsys"); - if (IS_ERR(eth->ethsys)) { - dev_err(&pdev->dev, "no ethsys regmap found\n"); - return PTR_ERR(eth->ethsys); + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,ethsys"); + if (IS_ERR(eth->ethsys)) { + dev_err(&pdev->dev, "no ethsys regmap found\n"); + return PTR_ERR(eth->ethsys); + } } if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { @@ -2572,9 +2996,12 @@ static int mtk_probe(struct platform_device *pdev) if (err) goto err_free_dev; - err = mtk_mdio_init(eth); - if (err) - goto err_free_dev; + /* No MT7628/88 support yet */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + err = mtk_mdio_init(eth); + if (err) + goto err_free_dev; + } for (i = 0; i < MTK_MAX_DEVS; i++) { if (!eth->netdev[i]) @@ -2616,6 +3043,7 @@ err_deinit_hw: static int mtk_remove(struct platform_device *pdev) { struct mtk_eth *eth = platform_get_drvdata(pdev); + struct mtk_mac *mac; int i; /* stop all devices to make sure that dma is properly shut down */ @@ -2623,6 +3051,8 @@ static int mtk_remove(struct platform_device *pdev) if (!eth->netdev[i]) continue; mtk_stop(eth->netdev[i]); + mac = netdev_priv(eth->netdev[i]); + phylink_disconnect_phy(mac->phylink); } mtk_hw_deinit(eth); @@ -2637,12 +3067,14 @@ static int mtk_remove(struct platform_device *pdev) static const struct mtk_soc_data mt2701_data = { .caps = MT7623_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, }; static const struct mtk_soc_data mt7621_data = { .caps = MT7621_CAPS, + .hw_features = MTK_HW_FEATURES, .required_clks = MT7621_CLKS_BITMAP, .required_pctl = false, }; @@ -2650,12 +3082,14 @@ static const struct mtk_soc_data mt7621_data = { static const struct mtk_soc_data mt7622_data = { .ana_rgc3 = 0x2028, .caps = MT7622_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, .required_clks = MT7622_CLKS_BITMAP, .required_pctl = false, }; static const struct mtk_soc_data mt7623_data = { .caps = MT7623_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, }; @@ -2663,16 +3097,25 @@ static const struct mtk_soc_data mt7623_data = { static const struct mtk_soc_data mt7629_data = { .ana_rgc3 = 0x128, .caps = MT7629_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, .required_clks = MT7629_CLKS_BITMAP, .required_pctl = false, }; +static const struct mtk_soc_data rt5350_data = { + .caps = MT7628_CAPS, + .hw_features = MTK_HW_FEATURES_MT7628, + .required_clks = MT7628_CLKS_BITMAP, + .required_pctl = false, +}; + const struct of_device_id of_mtk_match[] = { { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data}, { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data}, { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data}, + { .compatible = "ralink,rt5350-eth", .data = &rt5350_data}, {}, }; MODULE_DEVICE_TABLE(of, of_mtk_match); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index bab94f763e2c..76bd12cb8150 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -14,6 +14,7 @@ #include <linux/of_net.h> #include <linux/u64_stats_sync.h> #include <linux/refcount.h> +#include <linux/phylink.h> #define MTK_QDMA_PAGE_SIZE 2048 #define MTK_MAX_RX_LENGTH 1536 @@ -39,7 +40,8 @@ NETIF_F_SG | NETIF_F_TSO | \ NETIF_F_TSO6 | \ NETIF_F_IPV6_CSUM) -#define NEXT_RX_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) +#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) +#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) #define MTK_MAX_RX_RING_NUM 4 #define MTK_HW_LRO_DMA_SIZE 8 @@ -118,6 +120,7 @@ /* PDMA Global Configuration Register */ #define MTK_PDMA_GLO_CFG 0xa04 #define MTK_MULTI_EN BIT(10) +#define MTK_PDMA_SIZE_8DWORDS (1 << 4) /* PDMA Reset Index Register */ #define MTK_PDMA_RST_IDX 0xa08 @@ -212,7 +215,7 @@ #define FC_THRES_MIN 0x4444 /* QDMA Interrupt Status Register */ -#define MTK_QMTK_INT_STATUS 0x1A18 +#define MTK_QDMA_INT_STATUS 0x1A18 #define MTK_RX_DONE_DLY BIT(30) #define MTK_RX_DONE_INT3 BIT(19) #define MTK_RX_DONE_INT2 BIT(18) @@ -276,11 +279,18 @@ #define TX_DMA_OWNER_CPU BIT(31) #define TX_DMA_LS0 BIT(30) #define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16) +#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN) #define TX_DMA_SWC BIT(14) #define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16) +/* PDMA on MT7628 */ +#define TX_DMA_DONE BIT(31) +#define TX_DMA_LS1 BIT(14) +#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) + /* QDMA descriptor rxd2 */ #define RX_DMA_DONE BIT(31) +#define RX_DMA_LSO BIT(30) #define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) #define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) @@ -289,6 +299,7 @@ /* QDMA descriptor rxd4 */ #define RX_DMA_L4_VALID BIT(24) +#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ #define RX_DMA_FPORT_SHIFT 19 #define RX_DMA_FPORT_MASK 0x7 @@ -320,12 +331,19 @@ #define MAC_MCR_SPEED_100 BIT(2) #define MAC_MCR_FORCE_DPX BIT(1) #define MAC_MCR_FORCE_LINK BIT(0) -#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | \ - MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \ - MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \ - MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \ - MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \ - MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK) +#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE) + +/* Mac status registers */ +#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100)) +#define MAC_MSR_EEE1G BIT(7) +#define MAC_MSR_EEE100M BIT(6) +#define MAC_MSR_RX_FC BIT(5) +#define MAC_MSR_TX_FC BIT(4) +#define MAC_MSR_SPEED_1000 BIT(3) +#define MAC_MSR_SPEED_100 BIT(2) +#define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100) +#define MAC_MSR_DPX BIT(1) +#define MAC_MSR_LINK BIT(0) /* TRGMII RXC control register */ #define TRGMII_RCK_CTRL 0x10300 @@ -394,14 +412,38 @@ /* Register to auto-negotiation restart */ #define SGMSYS_PCS_CONTROL_1 0x0 #define SGMII_AN_RESTART BIT(9) +#define SGMII_ISOLATE BIT(10) +#define SGMII_AN_ENABLE BIT(12) +#define SGMII_LINK_STATYS BIT(18) +#define SGMII_AN_ABILITY BIT(19) +#define SGMII_AN_COMPLETE BIT(21) +#define SGMII_PCS_FAULT BIT(23) +#define SGMII_AN_EXPANSION_CLR BIT(30) /* Register to programmable link timer, the unit in 2 * 8ns */ #define SGMSYS_PCS_LINK_TIMER 0x18 #define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0)) /* Register to control remote fault */ -#define SGMSYS_SGMII_MODE 0x20 -#define SGMII_REMOTE_FAULT_DIS BIT(8) +#define SGMSYS_SGMII_MODE 0x20 +#define SGMII_IF_MODE_BIT0 BIT(0) +#define SGMII_SPEED_DUPLEX_AN BIT(1) +#define SGMII_SPEED_10 0x0 +#define SGMII_SPEED_100 BIT(2) +#define SGMII_SPEED_1000 BIT(3) +#define SGMII_DUPLEX_FULL BIT(4) +#define SGMII_IF_MODE_BIT5 BIT(5) +#define SGMII_REMOTE_FAULT_DIS BIT(8) +#define SGMII_CODE_SYNC_SET_VAL BIT(9) +#define SGMII_CODE_SYNC_SET_EN BIT(10) +#define SGMII_SEND_AN_ERROR_EN BIT(11) +#define SGMII_IF_MODE_MASK GENMASK(5, 1) + +/* Register to set SGMII speed, ANA RG_ Control Signals III*/ +#define SGMSYS_ANA_RG_CS3 0x2028 +#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3)) +#define RG_PHY_SPEED_1_25G 0x0 +#define RG_PHY_SPEED_3_125G BIT(2) /* Register to power up QPHY */ #define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8 @@ -412,6 +454,19 @@ #define CO_QPHY_SEL BIT(0) #define GEPHY_MAC_SEL BIT(1) +/* MT7628/88 specific stuff */ +#define MT7628_PDMA_OFFSET 0x0800 +#define MT7628_SDM_OFFSET 0x0c00 + +#define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00) +#define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04) +#define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08) +#define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c) +#define MT7628_PST_DTX_IDX0 BIT(0) + +#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c) +#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10) + struct mtk_rx_dma { unsigned int rxd1; unsigned int rxd2; @@ -509,6 +564,7 @@ enum mtk_clks_map { BIT(MTK_CLK_SGMII_CK) | \ BIT(MTK_CLK_ETH2PLL)) #define MT7621_CLKS_BITMAP (0) +#define MT7628_CLKS_BITMAP (0) #define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \ @@ -563,6 +619,10 @@ struct mtk_tx_ring { struct mtk_tx_dma *last_free; u16 thresh; atomic_t free_count; + int dma_size; + struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */ + dma_addr_t phys_pdma; + int cpu_idx; }; /* PDMA rx ring mode */ @@ -604,6 +664,8 @@ enum mkt_eth_capabilities { MTK_HWLRO_BIT, MTK_SHARED_INT_BIT, MTK_TRGMII_MT7621_CLK_BIT, + MTK_QDMA_BIT, + MTK_SOC_MT7628_BIT, /* MUX BITS*/ MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, @@ -634,6 +696,8 @@ enum mkt_eth_capabilities { #define MTK_HWLRO BIT(MTK_HWLRO_BIT) #define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT) #define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT) +#define MTK_QDMA BIT(MTK_QDMA_BIT) +#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT) #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) @@ -687,26 +751,31 @@ enum mkt_eth_capabilities { #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) #define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \ - MTK_GMAC2_RGMII | MTK_SHARED_INT | MTK_TRGMII_MT7621_CLK) + MTK_GMAC2_RGMII | MTK_SHARED_INT | \ + MTK_TRGMII_MT7621_CLK | MTK_QDMA) #define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \ MTK_GMAC2_SGMII | MTK_GDM1_ESW | \ MTK_MUX_GDM1_TO_GMAC1_ESW | \ - MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII) + MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA) -#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII) +#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \ + MTK_QDMA) + +#define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628) #define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \ MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \ MTK_MUX_U3_GMAC2_TO_QPHY | \ - MTK_MUX_GMAC12_TO_GEPHY_SGMII) + MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) /* struct mtk_eth_data - This is the structure holding all differences * among various plaforms * @ana_rgc3: The offset for register ANA_RGC3 related to * sgmiisys syscon * @caps Flags shown the extra capability for the SoC + * @hw_features Flags shown HW features * @required_clks Flags shown the bitmap for required clocks on * the target SoC * @required_pctl A bool value to show whether the SoC requires @@ -717,6 +786,7 @@ struct mtk_soc_data { u32 caps; u32 required_clks; bool required_pctl; + netdev_features_t hw_features; }; /* currently no SoC has more than 2 macs */ @@ -810,27 +880,33 @@ struct mtk_eth { unsigned long state; const struct mtk_soc_data *soc; + + u32 tx_int_mask_reg; + u32 tx_int_status_reg; + u32 rx_dma_l4_valid; + int ip_align; }; /* struct mtk_mac - the structure that holds the info about the MACs of the * SoC * @id: The number of the MAC - * @ge_mode: Interface mode kept for setup restoring + * @interface: Interface mode kept for detecting change in hw settings * @of_node: Our devicetree node * @hw: Backpointer to our main datastruture * @hw_stats: Packet statistics counter - * @trgmii Indicate if the MAC uses TRGMII connected to internal - switch */ struct mtk_mac { int id; - int ge_mode; + phy_interface_t interface; + unsigned int mode; + int speed; struct device_node *of_node; + struct phylink *phylink; + struct phylink_config phylink_config; struct mtk_eth *hw; struct mtk_hw_stats *hw_stats; __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT]; int hwlro_ip_cnt; - bool trgmii; }; /* the struct describing the SoC. these are declared in the soc_xyz.c files */ @@ -845,7 +921,12 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg); int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np, u32 ana_rgc3); int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id); -int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id); -int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode); +int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, + const struct phylink_link_state *state); +void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id); + +int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); +int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); +int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); #endif /* MTK_ETH_H */ diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index ff509d42d818..4db27dfc7ec1 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -16,8 +16,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) { struct device_node *np; - const char *str; - int i, err; + int i; ss->ana_rgc3 = ana_rgc3; @@ -29,19 +28,6 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) ss->regmap[i] = syscon_node_to_regmap(np); if (IS_ERR(ss->regmap[i])) return PTR_ERR(ss->regmap[i]); - - err = of_property_read_string(np, "mediatek,physpeed", &str); - if (err) - return err; - - if (!strcmp(str, "2500")) - ss->flags[i] |= MTK_SGMII_PHYSPEED_2500; - else if (!strcmp(str, "1000")) - ss->flags[i] |= MTK_SGMII_PHYSPEED_1000; - else if (!strcmp(str, "auto")) - ss->flags[i] |= MTK_SGMII_PHYSPEED_AN; - else - return -EINVAL; } return 0; @@ -73,27 +59,45 @@ int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) return 0; } -int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id) +int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, + const struct phylink_link_state *state) { unsigned int val; - int mode; if (!ss->regmap[id]) return -EINVAL; regmap_read(ss->regmap[id], ss->ana_rgc3, &val); - val &= ~GENMASK(3, 2); - mode = ss->flags[id] & MTK_SGMII_PHYSPEED_MASK; - val |= (mode == MTK_SGMII_PHYSPEED_1000) ? 0 : BIT(2); + val &= ~RG_PHY_SPEED_MASK; + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + val |= RG_PHY_SPEED_3_125G; regmap_write(ss->regmap[id], ss->ana_rgc3, val); /* Disable SGMII AN */ regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val); - val &= ~BIT(12); + val &= ~SGMII_AN_ENABLE; regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val); /* SGMII force mode setting */ - val = 0x31120019; + regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); + val &= ~SGMII_IF_MODE_MASK; + + switch (state->speed) { + case SPEED_10: + val |= SGMII_SPEED_10; + break; + case SPEED_100: + val |= SGMII_SPEED_100; + break; + case SPEED_2500: + case SPEED_1000: + val |= SGMII_SPEED_1000; + break; + }; + + if (state->duplex == DUPLEX_FULL) + val |= SGMII_DUPLEX_FULL; + regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); /* Release PHYA power down state */ @@ -103,3 +107,20 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id) return 0; } + +void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id) +{ + struct mtk_sgmii *ss = eth->sgmii; + unsigned int val, sid; + + /* Decide how GMAC and SGMIISYS be mapped */ + sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? + 0 : mac_id; + + if (!ss->regmap[sid]) + return; + + regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val); + val |= SGMII_AN_RESTART; + regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 87e90b5d4d7d..5b11557f1ae4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -210,7 +210,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist) mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP && !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) { - err = mlx4_restart_one(persist->pdev, false, NULL); + err = mlx4_restart_one(persist->pdev); mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n", err); } diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c index 88316c743820..eaf08f7ad128 100644 --- a/drivers/net/ethernet/mellanox/mlx4/crdump.c +++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c @@ -99,8 +99,7 @@ static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev, readl(cr_space + offset); err = devlink_region_snapshot_create(crdump->region_crspace, - cr_res_size, crspace_data, - id, &kvfree); + crspace_data, id, &kvfree); if (err) { kvfree(crspace_data); mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n", @@ -139,9 +138,7 @@ static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev, readl(health_buf_start + offset); err = devlink_region_snapshot_create(crdump->region_fw_health, - HEALTH_BUFFER_SIZE, - health_data, - id, &kvfree); + health_data, id, &kvfree); if (err) { kvfree(health_data); mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 94c59939a8cf..d8313e2ee600 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -639,7 +639,7 @@ static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg, #define MLX4_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \ ({ \ struct ptys2ethtool_config *cfg; \ - const unsigned int modes[] = { __VA_ARGS__ }; \ + static const unsigned int modes[] = { __VA_ARGS__ }; \ unsigned int i; \ cfg = &ptys2ethtool_map[reg_]; \ cfg->speed = speed_; \ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index c1438ae52a11..40ec5acf79c0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2645,14 +2645,6 @@ out: en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); return; } - - /* set offloads */ - priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL; } static void mlx4_en_del_vxlan_offloads(struct work_struct *work) @@ -2660,14 +2652,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) int ret; struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, vxlan_del_task); - /* unset offloads */ - priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL); - ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 0); if (ret) @@ -3415,6 +3399,23 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (mdev->LSO_support) dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + if (mdev->dev->caps.tunnel_offload_mode == + MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + dev->features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; + dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + } + dev->vlan_features = dev->hw_features; dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; @@ -3483,16 +3484,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->rss_hash_fn = ETH_RSS_HASH_TOP; } - if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { - dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL; - dev->features |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL; - dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; - } - /* MTU range: 68 - hw-specific max */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = priv->max_mtu; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 36a92b19e613..4d5ca302c067 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -772,9 +772,7 @@ static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv, /* Map fragments if any */ for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) { - const struct skb_frag_struct *frag; - - frag = &shinfo->frags[i_frag]; + const skb_frag_t *frag = &shinfo->frags[i_frag]; byte_count = skb_frag_size(frag); dma = skb_frag_dma_map(ddev, frag, 0, byte_count, diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 309470ec0219..fce9b3a24347 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2292,23 +2292,31 @@ static int mlx4_init_fw(struct mlx4_dev *dev) static int mlx4_init_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_init_hca_param *init_hca = NULL; + struct mlx4_dev_cap *dev_cap = NULL; struct mlx4_adapter adapter; - struct mlx4_dev_cap dev_cap; struct mlx4_profile profile; - struct mlx4_init_hca_param init_hca; u64 icm_size; struct mlx4_config_dev_params params; int err; if (!mlx4_is_slave(dev)) { - err = mlx4_dev_cap(dev, &dev_cap); + dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); + init_hca = kzalloc(sizeof(*init_hca), GFP_KERNEL); + + if (!dev_cap || !init_hca) { + err = -ENOMEM; + goto out_free; + } + + err = mlx4_dev_cap(dev, dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); - return err; + goto out_free; } - choose_steering_mode(dev, &dev_cap); - choose_tunnel_offload_mode(dev, &dev_cap); + choose_steering_mode(dev, dev_cap); + choose_tunnel_offload_mode(dev, dev_cap); if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && mlx4_is_master(dev)) @@ -2331,48 +2339,48 @@ static int mlx4_init_hca(struct mlx4_dev *dev) MLX4_STEERING_MODE_DEVICE_MANAGED) profile.num_mcg = MLX4_FS_NUM_MCG; - icm_size = mlx4_make_profile(dev, &profile, &dev_cap, - &init_hca); + icm_size = mlx4_make_profile(dev, &profile, dev_cap, + init_hca); if ((long long) icm_size < 0) { err = icm_size; - return err; + goto out_free; } dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; if (enable_4k_uar || !dev->persist->num_vfs) { - init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + + init_hca->log_uar_sz = ilog2(dev->caps.num_uars) + PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; - init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; + init_hca->uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; } else { - init_hca.log_uar_sz = ilog2(dev->caps.num_uars); - init_hca.uar_page_sz = PAGE_SHIFT - 12; + init_hca->log_uar_sz = ilog2(dev->caps.num_uars); + init_hca->uar_page_sz = PAGE_SHIFT - 12; } - init_hca.mw_enabled = 0; + init_hca->mw_enabled = 0; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) - init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; + init_hca->mw_enabled = INIT_HCA_TPT_MW_ENABLE; - err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); + err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size); if (err) - return err; + goto out_free; - err = mlx4_INIT_HCA(dev, &init_hca); + err = mlx4_INIT_HCA(dev, init_hca); if (err) { mlx4_err(dev, "INIT_HCA command failed, aborting\n"); goto err_free_icm; } - if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { - err = mlx4_query_func(dev, &dev_cap); + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_query_func(dev, dev_cap); if (err < 0) { mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); goto err_close; } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { - dev->caps.num_eqs = dev_cap.max_eqs; - dev->caps.reserved_eqs = dev_cap.reserved_eqs; - dev->caps.reserved_uars = dev_cap.reserved_uars; + dev->caps.num_eqs = dev_cap->max_eqs; + dev->caps.reserved_eqs = dev_cap->reserved_eqs; + dev->caps.reserved_uars = dev_cap->reserved_uars; } } @@ -2381,14 +2389,13 @@ static int mlx4_init_hca(struct mlx4_dev *dev) * read HCA frequency by QUERY_HCA command */ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { - memset(&init_hca, 0, sizeof(init_hca)); - err = mlx4_QUERY_HCA(dev, &init_hca); + err = mlx4_QUERY_HCA(dev, init_hca); if (err) { mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; } else { dev->caps.hca_core_clock = - init_hca.hca_core_clock; + init_hca->hca_core_clock; } /* In case we got HCA frequency 0 - disable timestamping @@ -2464,7 +2471,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev) priv->eq_table.inta_pin = adapter.inta_pin; memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id)); - return 0; + err = 0; + goto out_free; unmap_bf: unmap_internal_clock(dev); @@ -2483,6 +2491,10 @@ err_free_icm: if (!mlx4_is_slave(dev)) mlx4_free_icms(dev); +out_free: + kfree(dev_cap); + kfree(init_hca); + return err; } @@ -3919,26 +3931,43 @@ static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink) } } -static int mlx4_devlink_reload(struct devlink *devlink, - struct netlink_ext_ack *extack) +static void mlx4_restart_one_down(struct pci_dev *pdev); +static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload, + struct devlink *devlink); + +static int mlx4_devlink_reload_down(struct devlink *devlink, + struct netlink_ext_ack *extack) { struct mlx4_priv *priv = devlink_priv(devlink); struct mlx4_dev *dev = &priv->dev; struct mlx4_dev_persistent *persist = dev->persist; - int err; if (persist->num_vfs) mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n"); - err = mlx4_restart_one(persist->pdev, true, devlink); + mlx4_restart_one_down(persist->pdev); + return 0; +} + +static int mlx4_devlink_reload_up(struct devlink *devlink, + struct netlink_ext_ack *extack) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_dev_persistent *persist = dev->persist; + int err; + + err = mlx4_restart_one_up(persist->pdev, true, devlink); if (err) - mlx4_err(persist->dev, "mlx4_restart_one failed, ret=%d\n", err); + mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n", + err); return err; } static const struct devlink_ops mlx4_devlink_ops = { .port_type_set = mlx4_devlink_port_type_set, - .reload = mlx4_devlink_reload, + .reload_down = mlx4_devlink_reload_down, + .reload_up = mlx4_devlink_reload_up, }; static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) @@ -4151,7 +4180,13 @@ static int restore_current_port_types(struct mlx4_dev *dev, return err; } -int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink) +static void mlx4_restart_one_down(struct pci_dev *pdev) +{ + mlx4_unload_one(pdev); +} + +static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload, + struct devlink *devlink) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; @@ -4163,7 +4198,6 @@ int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink) total_vfs = dev->persist->num_vfs; memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); - mlx4_unload_one(pdev); if (reload) mlx4_devlink_param_load_driverinit_values(devlink); err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); @@ -4182,6 +4216,12 @@ int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink) return err; } +int mlx4_restart_one(struct pci_dev *pdev) +{ + mlx4_restart_one_down(pdev); + return mlx4_restart_one_up(pdev, false, NULL); +} + #define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT } #define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF } #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 23f1b5b512c2..527b52e48276 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1043,8 +1043,7 @@ int mlx4_catas_init(struct mlx4_dev *dev); void mlx4_catas_end(struct mlx4_dev *dev); int mlx4_crdump_init(struct mlx4_dev *dev); void mlx4_crdump_end(struct mlx4_dev *dev); -int mlx4_restart_one(struct pci_dev *pdev, bool reload, - struct devlink *devlink); +int mlx4_restart_one(struct pci_dev *pdev); int mlx4_register_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev); void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 37fef8cd25e3..0dba272a5b2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -10,6 +10,7 @@ config MLX5_CORE imply PTP_1588_CLOCK imply VXLAN imply MLXFW + imply PCI_HYPERV_INTERFACE default n ---help--- Core driver for low level functionality of the ConnectX-4 and @@ -32,7 +33,6 @@ config MLX5_FPGA config MLX5_CORE_EN bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support" depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE - depends on IPV6=y || IPV6=n || MLX5_CORE=m select PAGE_POOL select DIMLIB default n @@ -154,3 +154,10 @@ config MLX5_EN_TLS Build support for TLS cryptography-offload accelaration in the NIC. Note: Support for hardware with this capability needs to be selected for this option to become available. + +config MLX5_SW_STEERING + bool "Mellanox Technologies software-managed steering" + depends on MLX5_CORE_EN && MLX5_ESWITCH + default y + help + Build support for software-managed steering in the NIC. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 57d2cc666fe3..5708fcc079ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -15,7 +15,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \ transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \ fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ - lib/devcom.o lib/pci_vsc.o diag/fs_tracepoint.o \ + lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \ diag/fw_tracer.o diag/crdump.o devlink.o # @@ -23,8 +23,9 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ # mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ - en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o \ - en/params.o en/xsk/umem.o en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o + en_selftest.o en/port.o en/monitor_stats.o en/health.o \ + en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \ + en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o # # Netdev extra @@ -34,7 +35,8 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \ lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \ - en/tc_tun_geneve.o + en/tc_tun_geneve.o diag/en_tc_tracepoint.o +mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o # # Core extra @@ -44,6 +46,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offlo mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o +mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += lib/hv.o lib/hv_vhca.o # # Ipoib netdev @@ -64,3 +67,10 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \ en_accel/ktls.o en_accel/ktls_tx.o + +mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \ + steering/dr_matcher.o steering/dr_rule.o \ + steering/dr_icm_pool.o steering/dr_crc32.o \ + steering/dr_ste.o steering/dr_send.o \ + steering/dr_cmd.o steering/dr_fw.o \ + steering/dr_action.o steering/fs_dr.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 8cdd7e66f8df..ea934cd02448 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -446,6 +446,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_CREATE_UMEM: case MLX5_CMD_OP_DESTROY_UMEM: case MLX5_CMD_OP_ALLOC_MEMIC: + case MLX5_CMD_OP_MODIFY_XRQ: + case MLX5_CMD_OP_RELEASE_XRQ_ERROR: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -637,6 +639,8 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(DESTROY_UCTX); MLX5_COMMAND_STR_CASE(CREATE_UMEM); MLX5_COMMAND_STR_CASE(DESTROY_UMEM); + MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR); + MLX5_COMMAND_STR_CASE(MODIFY_XRQ); default: return "unknown command opcode"; } } @@ -1368,49 +1372,19 @@ static void clean_debug_files(struct mlx5_core_dev *dev) debugfs_remove_recursive(dbg->dbg_root); } -static int create_debugfs_files(struct mlx5_core_dev *dev) +static void create_debugfs_files(struct mlx5_core_dev *dev) { struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; - int err = -ENOMEM; - - if (!mlx5_debugfs_root) - return 0; dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); - if (!dbg->dbg_root) - return err; - - dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root, - dev, &dfops); - if (!dbg->dbg_in) - goto err_dbg; - dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root, - dev, &dfops); - if (!dbg->dbg_out) - goto err_dbg; - - dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root, - dev, &olfops); - if (!dbg->dbg_outlen) - goto err_dbg; - - dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root, - &dbg->status); - if (!dbg->dbg_status) - goto err_dbg; - - dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); - if (!dbg->dbg_run) - goto err_dbg; + debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops); + debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops); + debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops); + debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status); + debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); mlx5_cmdif_debugfs_init(dev); - - return 0; - -err_dbg: - clean_debug_files(dev); - return err; } static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) @@ -2007,17 +1981,10 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) goto err_cache; } - err = create_debugfs_files(dev); - if (err) { - err = -ENOMEM; - goto err_wq; - } + create_debugfs_files(dev); return 0; -err_wq: - destroy_workqueue(cmd->wq); - err_cache: destroy_msg_cache(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index a11e22d0b0cc..04854e5fbcd7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -92,8 +92,6 @@ EXPORT_SYMBOL(mlx5_debugfs_root); void mlx5_register_debugfs(void) { mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL); - if (IS_ERR_OR_NULL(mlx5_debugfs_root)) - mlx5_debugfs_root = NULL; } void mlx5_unregister_debugfs(void) @@ -101,45 +99,25 @@ void mlx5_unregister_debugfs(void) debugfs_remove(mlx5_debugfs_root); } -int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) +void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) { - if (!mlx5_debugfs_root) - return 0; - atomic_set(&dev->num_qps, 0); dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root); - if (!dev->priv.qp_debugfs) - return -ENOMEM; - - return 0; } void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) { - if (!mlx5_debugfs_root) - return; - debugfs_remove_recursive(dev->priv.qp_debugfs); } -int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) +void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) { - if (!mlx5_debugfs_root) - return 0; - dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root); - if (!dev->priv.eq_debugfs) - return -ENOMEM; - - return 0; } void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev) { - if (!mlx5_debugfs_root) - return; - debugfs_remove_recursive(dev->priv.eq_debugfs); } @@ -183,85 +161,41 @@ static const struct file_operations stats_fops = { .write = average_write, }; -int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) +void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) { struct mlx5_cmd_stats *stats; struct dentry **cmd; const char *namep; - int err; int i; - if (!mlx5_debugfs_root) - return 0; - cmd = &dev->priv.cmdif_debugfs; *cmd = debugfs_create_dir("commands", dev->priv.dbg_root); - if (!*cmd) - return -ENOMEM; for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) { stats = &dev->cmd.stats[i]; namep = mlx5_command_str(i); if (strcmp(namep, "unknown command opcode")) { stats->root = debugfs_create_dir(namep, *cmd); - if (!stats->root) { - mlx5_core_warn(dev, "failed adding command %d\n", - i); - err = -ENOMEM; - goto out; - } - - stats->avg = debugfs_create_file("average", 0400, - stats->root, stats, - &stats_fops); - if (!stats->avg) { - mlx5_core_warn(dev, "failed creating debugfs file\n"); - err = -ENOMEM; - goto out; - } - - stats->count = debugfs_create_u64("n", 0400, - stats->root, - &stats->n); - if (!stats->count) { - mlx5_core_warn(dev, "failed creating debugfs file\n"); - err = -ENOMEM; - goto out; - } + + debugfs_create_file("average", 0400, stats->root, stats, + &stats_fops); + debugfs_create_u64("n", 0400, stats->root, &stats->n); } } - - return 0; -out: - debugfs_remove_recursive(dev->priv.cmdif_debugfs); - return err; } void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev) { - if (!mlx5_debugfs_root) - return; - debugfs_remove_recursive(dev->priv.cmdif_debugfs); } -int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) +void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) { - if (!mlx5_debugfs_root) - return 0; - dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root); - if (!dev->priv.cq_debugfs) - return -ENOMEM; - - return 0; } void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) { - if (!mlx5_debugfs_root) - return; - debugfs_remove_recursive(dev->priv.cq_debugfs); } @@ -484,7 +418,6 @@ static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type, { struct mlx5_rsc_debug *d; char resn[32]; - int err; int i; d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL); @@ -496,30 +429,15 @@ static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type, d->type = type; sprintf(resn, "0x%x", rsn); d->root = debugfs_create_dir(resn, root); - if (!d->root) { - err = -ENOMEM; - goto out_free; - } for (i = 0; i < nfile; i++) { d->fields[i].i = i; - d->fields[i].dent = debugfs_create_file(field[i], 0400, - d->root, &d->fields[i], - &fops); - if (!d->fields[i].dent) { - err = -ENOMEM; - goto out_rem; - } + debugfs_create_file(field[i], 0400, d->root, &d->fields[i], + &fops); } *dbg = d; return 0; -out_rem: - debugfs_remove_recursive(d->root); - -out_free: - kfree(d); - return err; } static void rem_res_tree(struct mlx5_rsc_debug *d) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index a400f4430c28..381925c90d94 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -4,6 +4,7 @@ #include <devlink.h> #include "mlx5_core.h" +#include "fs_core.h" #include "eswitch.h" static int mlx5_devlink_flash_update(struct devlink *devlink, @@ -107,12 +108,121 @@ void mlx5_devlink_free(struct devlink *devlink) devlink_free(devlink); } +static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + char *value = val.vstr; + int err = 0; + + if (!strcmp(value, "dmfs")) { + return 0; + } else if (!strcmp(value, "smfs")) { + u8 eswitch_mode; + bool smfs_cap; + + eswitch_mode = mlx5_eswitch_mode(dev->priv.eswitch); + smfs_cap = mlx5_fs_dr_is_supported(dev); + + if (!smfs_cap) { + err = -EOPNOTSUPP; + NL_SET_ERR_MSG_MOD(extack, + "Software managed steering is not supported by current device"); + } + + else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) { + NL_SET_ERR_MSG_MOD(extack, + "Software managed steering is not supported when eswitch offloads enabled."); + err = -EOPNOTSUPP; + } + } else { + NL_SET_ERR_MSG_MOD(extack, + "Bad parameter: supported values are [\"dmfs\", \"smfs\"]"); + err = -EINVAL; + } + + return err; +} + +static int mlx5_devlink_fs_mode_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + enum mlx5_flow_steering_mode mode; + + if (!strcmp(ctx->val.vstr, "smfs")) + mode = MLX5_FLOW_STEERING_MODE_SMFS; + else + mode = MLX5_FLOW_STEERING_MODE_DMFS; + dev->priv.steering->mode = mode; + + return 0; +} + +static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) + strcpy(ctx->val.vstr, "smfs"); + else + strcpy(ctx->val.vstr, "dmfs"); + return 0; +} + +enum mlx5_devlink_param_id { + MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, +}; + +static const struct devlink_param mlx5_devlink_params[] = { + DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, + "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set, + mlx5_devlink_fs_mode_validate), +}; + +static void mlx5_devlink_set_params_init_values(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + union devlink_param_value value; + + if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS) + strcpy(value.vstr, "dmfs"); + else + strcpy(value.vstr, "smfs"); + devlink_param_driverinit_value_set(devlink, + MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, + value); +} + int mlx5_devlink_register(struct devlink *devlink, struct device *dev) { - return devlink_register(devlink, dev); + int err; + + err = devlink_register(devlink, dev); + if (err) + return err; + + err = devlink_params_register(devlink, mlx5_devlink_params, + ARRAY_SIZE(mlx5_devlink_params)); + if (err) + goto params_reg_err; + mlx5_devlink_set_params_init_values(devlink); + devlink_params_publish(devlink); + return 0; + +params_reg_err: + devlink_unregister(devlink); + return err; } void mlx5_devlink_unregister(struct devlink *devlink) { + devlink_params_unregister(devlink, mlx5_devlink_params, + ARRAY_SIZE(mlx5_devlink_params)); devlink_unregister(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h new file mode 100644 index 000000000000..1177860a2ee4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +#if !defined(_MLX5_EN_REP_TP_) || defined(TRACE_HEADER_MULTI_READ) +#define _MLX5_EN_REP_TP_ + +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> +#include "en_rep.h" + +TRACE_EVENT(mlx5e_rep_neigh_update, + TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, const u8 *ha, + bool neigh_connected), + TP_ARGS(nhe, ha, neigh_connected), + TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name) + __array(u8, ha, ETH_ALEN) + __array(u8, v4, 4) + __array(u8, v6, 16) + __field(bool, neigh_connected) + ), + TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh; + struct in6_addr *pin6; + __be32 *p32; + + __assign_str(devname, mn->dev->name); + __entry->neigh_connected = neigh_connected; + memcpy(__entry->ha, ha, ETH_ALEN); + + p32 = (__be32 *)__entry->v4; + pin6 = (struct in6_addr *)__entry->v6; + if (mn->family == AF_INET) { + *p32 = mn->dst_ip.v4; + ipv6_addr_set_v4mapped(*p32, pin6); + } else if (mn->family == AF_INET6) { + *pin6 = mn->dst_ip.v6; + } + ), + TP_printk("netdev: %s MAC: %pM IPv4: %pI4 IPv6: %pI6c neigh_connected=%d\n", + __get_str(devname), __entry->ha, + __entry->v4, __entry->v6, __entry->neigh_connected + ) +); + +#endif /* _MLX5_EN_REP_TP_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ./diag +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE en_rep_tracepoint +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c new file mode 100644 index 000000000000..c5dc6c50fa87 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#define CREATE_TRACE_POINTS +#include "en_tc_tracepoint.h" + +void put_ids_to_array(int *ids, + const struct flow_action_entry *entries, + unsigned int num) +{ + unsigned int i; + + for (i = 0; i < num; i++) + ids[i] = entries[i].id; +} + +#define NAME_SIZE 16 + +static const char FLOWACT2STR[NUM_FLOW_ACTIONS][NAME_SIZE] = { + [FLOW_ACTION_ACCEPT] = "ACCEPT", + [FLOW_ACTION_DROP] = "DROP", + [FLOW_ACTION_TRAP] = "TRAP", + [FLOW_ACTION_GOTO] = "GOTO", + [FLOW_ACTION_REDIRECT] = "REDIRECT", + [FLOW_ACTION_MIRRED] = "MIRRED", + [FLOW_ACTION_VLAN_PUSH] = "VLAN_PUSH", + [FLOW_ACTION_VLAN_POP] = "VLAN_POP", + [FLOW_ACTION_VLAN_MANGLE] = "VLAN_MANGLE", + [FLOW_ACTION_TUNNEL_ENCAP] = "TUNNEL_ENCAP", + [FLOW_ACTION_TUNNEL_DECAP] = "TUNNEL_DECAP", + [FLOW_ACTION_MANGLE] = "MANGLE", + [FLOW_ACTION_ADD] = "ADD", + [FLOW_ACTION_CSUM] = "CSUM", + [FLOW_ACTION_MARK] = "MARK", + [FLOW_ACTION_WAKE] = "WAKE", + [FLOW_ACTION_QUEUE] = "QUEUE", + [FLOW_ACTION_SAMPLE] = "SAMPLE", + [FLOW_ACTION_POLICE] = "POLICE", + [FLOW_ACTION_CT] = "CT", +}; + +const char *parse_action(struct trace_seq *p, + int *ids, + unsigned int num) +{ + const char *ret = trace_seq_buffer_ptr(p); + unsigned int i; + + for (i = 0; i < num; i++) { + if (ids[i] < NUM_FLOW_ACTIONS) + trace_seq_printf(p, "%s ", FLOWACT2STR[ids[i]]); + else + trace_seq_printf(p, "UNKNOWN "); + } + + trace_seq_putc(p, 0); + return ret; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h new file mode 100644 index 000000000000..d4e6cfaaade3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +#if !defined(_MLX5_TC_TP_) || defined(TRACE_HEADER_MULTI_READ) +#define _MLX5_TC_TP_ + +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> +#include <net/flow_offload.h> +#include "en_rep.h" + +#define __parse_action(ids, num) parse_action(p, ids, num) + +void put_ids_to_array(int *ids, + const struct flow_action_entry *entries, + unsigned int num); + +const char *parse_action(struct trace_seq *p, + int *ids, + unsigned int num); + +DECLARE_EVENT_CLASS(mlx5e_flower_template, + TP_PROTO(const struct flow_cls_offload *f), + TP_ARGS(f), + TP_STRUCT__entry(__field(void *, cookie) + __field(unsigned int, num) + __dynamic_array(int, ids, f->rule ? + f->rule->action.num_entries : 0) + ), + TP_fast_assign(__entry->cookie = (void *)f->cookie; + __entry->num = (f->rule ? + f->rule->action.num_entries : 0); + if (__entry->num) + put_ids_to_array(__get_dynamic_array(ids), + f->rule->action.entries, + f->rule->action.num_entries); + ), + TP_printk("cookie=%p actions= %s\n", + __entry->cookie, __entry->num ? + __parse_action(__get_dynamic_array(ids), + __entry->num) : "NULL" + ) +); + +DEFINE_EVENT(mlx5e_flower_template, mlx5e_configure_flower, + TP_PROTO(const struct flow_cls_offload *f), + TP_ARGS(f) + ); + +DEFINE_EVENT(mlx5e_flower_template, mlx5e_delete_flower, + TP_PROTO(const struct flow_cls_offload *f), + TP_ARGS(f) + ); + +TRACE_EVENT(mlx5e_stats_flower, + TP_PROTO(const struct flow_cls_offload *f), + TP_ARGS(f), + TP_STRUCT__entry(__field(void *, cookie) + __field(u64, bytes) + __field(u64, packets) + __field(u64, lastused) + ), + TP_fast_assign(__entry->cookie = (void *)f->cookie; + __entry->bytes = f->stats.bytes; + __entry->packets = f->stats.pkts; + __entry->lastused = f->stats.lastused; + ), + TP_printk("cookie=%p bytes=%llu packets=%llu lastused=%llu\n", + __entry->cookie, __entry->bytes, + __entry->packets, __entry->lastused + ) +); + +TRACE_EVENT(mlx5e_tc_update_neigh_used_value, + TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, bool neigh_used), + TP_ARGS(nhe, neigh_used), + TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name) + __array(u8, v4, 4) + __array(u8, v6, 16) + __field(bool, neigh_used) + ), + TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh; + struct in6_addr *pin6; + __be32 *p32; + + __assign_str(devname, mn->dev->name); + __entry->neigh_used = neigh_used; + + p32 = (__be32 *)__entry->v4; + pin6 = (struct in6_addr *)__entry->v6; + if (mn->family == AF_INET) { + *p32 = mn->dst_ip.v4; + ipv6_addr_set_v4mapped(*p32, pin6); + } else if (mn->family == AF_INET6) { + *pin6 = mn->dst_ip.v6; + } + ), + TP_printk("netdev: %s IPv4: %pI4 IPv6: %pI6c neigh_used=%d\n", + __get_str(devname), __entry->v4, __entry->v6, + __entry->neigh_used + ) +); + +#endif /* _MLX5_TC_TP_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ./diag +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE en_tc_tracepoint +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 8a4930c8bf62..94d7b69a95c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -546,16 +546,17 @@ static void mlx5_fw_tracer_save_trace(struct mlx5_fw_tracer *tracer, trace_data->timestamp = timestamp; trace_data->lost = lost; trace_data->event_id = event_id; - strncpy(trace_data->msg, msg, TRACE_STR_MSG); + strscpy_pad(trace_data->msg, msg, TRACE_STR_MSG); tracer->st_arr.saved_traces_index = (tracer->st_arr.saved_traces_index + 1) & (SAVED_TRACES_NUM - 1); mutex_unlock(&tracer->st_arr.lock); } -static void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt, - struct mlx5_core_dev *dev, - u64 trace_timestamp) +static noinline +void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt, + struct mlx5_core_dev *dev, + u64 trace_timestamp) { char tmp[512]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 65bec19a438f..8d76452cacdc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -54,6 +54,7 @@ #include "mlx5_core.h" #include "en_stats.h" #include "en/fs.h" +#include "lib/hv_vhca.h" extern const struct net_device_ops mlx5e_netdev_ops; struct page_pool; @@ -162,6 +163,14 @@ enum mlx5e_rq_group { #define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g) }; +static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev) +{ + if (mlx5_lag_is_lacp_owner(mdev)) + return 1; + + return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS); +} + static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) { switch (wq_type) { @@ -300,6 +309,7 @@ struct mlx5e_dcbx_dp { enum { MLX5E_RQ_STATE_ENABLED, + MLX5E_RQ_STATE_RECOVERING, MLX5E_RQ_STATE_AM, MLX5E_RQ_STATE_NO_CSUM_COMPLETE, MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ @@ -356,6 +366,7 @@ enum { MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_TLS, + MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, }; struct mlx5e_sq_wqe_info { @@ -480,8 +491,6 @@ struct mlx5e_xdp_mpwqe { struct mlx5e_tx_wqe *wqe; u8 ds_count; u8 pkt_count; - u8 max_ds_count; - u8 complete; u8 inline_on; }; @@ -552,6 +561,8 @@ struct mlx5e_icosq { /* control path */ struct mlx5_wq_ctrl wq_ctrl; struct mlx5e_channel *channel; + + struct work_struct recover_work; } ____cacheline_aligned_in_smp; struct mlx5e_wqe_frag_info { @@ -671,6 +682,8 @@ struct mlx5e_rq { struct zero_copy_allocator zca; struct xdp_umem *umem; + struct work_struct recover_work; + /* control */ struct mlx5_wq_ctrl wq_ctrl; __be32 mkey_be; @@ -700,6 +713,7 @@ struct mlx5e_channel { struct net_device *netdev; __be32 mkey_be; u8 num_tc; + u8 lag_port; /* XDP_REDIRECT */ struct mlx5e_xdpsq xdpsq; @@ -778,6 +792,15 @@ struct mlx5e_modify_sq_param { int rl_index; }; +#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) +struct mlx5e_hv_vhca_stats_agent { + struct mlx5_hv_vhca_agent *agent; + struct delayed_work work; + u16 delay; + void *buf; +}; +#endif + struct mlx5e_xsk { /* UMEMs are stored separately from channels, because we don't want to * lose them when channels are recreated. The kernel also stores UMEMs, @@ -804,7 +827,7 @@ struct mlx5e_priv { struct mlx5e_rq drop_rq; struct mlx5e_channels channels; - u32 tisn[MLX5E_MAX_NUM_TC]; + u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC]; struct mlx5e_rqt indir_rqt; struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; @@ -847,7 +870,11 @@ struct mlx5e_priv { struct mlx5e_tls *tls; #endif struct devlink_health_reporter *tx_reporter; + struct devlink_health_reporter *rx_reporter; struct mlx5e_xsk xsk; +#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) + struct mlx5e_hv_vhca_stats_agent stats_agent; +#endif }; struct mlx5e_profile { @@ -888,6 +915,26 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); +static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return mlx5_wq_ll_get_size(&rq->mpwqe.wq); + default: + return mlx5_wq_cyc_get_size(&rq->wqe.wq); + } +} + +static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return rq->mpwqe.wq.cur_sz; + default: + return rq->wqe.wq.cur_sz; + } +} + bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params); @@ -1006,18 +1053,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); +int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); +void mlx5e_activate_rq(struct mlx5e_rq *rq); +void mlx5e_deactivate_rq(struct mlx5e_rq *rq); +void mlx5e_free_rx_descs(struct mlx5e_rq *rq); +void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); +void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, struct mlx5e_modify_sq_param *p); void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_tx_disable_queue(struct netdev_queue *txq); -static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) -{ - return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) && - MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); -} - static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) { return MLX5_CAP_ETH(mdev, swp) && @@ -1063,6 +1110,7 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); int mlx5e_create_tises(struct mlx5e_priv *priv); +void mlx5e_destroy_tises(struct mlx5e_priv *priv); int mlx5e_update_nic_rx(struct mlx5e_priv *priv); void mlx5e_update_carrier(struct mlx5e_priv *priv); int mlx5e_close(struct net_device *netdev); @@ -1135,7 +1183,6 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, u16 num_channels); -u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index be5961ff24cc..68d593074f6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -10,11 +10,14 @@ enum { }; struct mlx5e_tc_table { + /* protects flow table */ + struct mutex t_lock; struct mlx5_flow_table *t; struct rhashtable ht; - DECLARE_HASHTABLE(mod_hdr_tbl, 8); + struct mod_hdr_tbl mod_hdr; + struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */ DECLARE_HASHTABLE(hairpin_tbl, 8); struct notifier_block netdevice_nb; @@ -92,9 +95,15 @@ struct mlx5e_tirc_config { enum mlx5e_tunnel_types { MLX5E_TT_IPV4_GRE, MLX5E_TT_IPV6_GRE, + MLX5E_TT_IPV4_IPIP, + MLX5E_TT_IPV6_IPIP, + MLX5E_TT_IPV4_IPV6, + MLX5E_TT_IPV6_IPV6, MLX5E_NUM_TUNNEL_TT, }; +bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev); + /* L3/L4 traffic type classifier */ struct mlx5e_ttc_table { struct mlx5e_flow_table ft; @@ -132,12 +141,17 @@ struct mlx5e_ethtool_steering { void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv); void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv); -int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); -int mlx5e_get_rxnfc(struct net_device *dev, - struct ethtool_rxnfc *info, u32 *rule_locs); +int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); +int mlx5e_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rule_locs); #else static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { } static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { } +static inline int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ return -EOPNOTSUPP; } +static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ return -EOPNOTSUPP; } #endif /* CONFIG_MLX5_EN_RXNFC */ #ifdef CONFIG_MLX5_EN_ARFS @@ -224,5 +238,8 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); +bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type); +bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev); + #endif /* __MLX5E_FLOW_STEER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c new file mode 100644 index 000000000000..1d6b58860da6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Mellanox Technologies. + +#include "health.h" +#include "lib/eq.h" + +int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name) +{ + int err; + + err = devlink_fmsg_pair_nest_start(fmsg, name); + if (err) + return err; + + err = devlink_fmsg_obj_nest_start(fmsg); + if (err) + return err; + + return 0; +} + +int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg) +{ + int err; + + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; + + err = devlink_fmsg_pair_nest_end(fmsg); + if (err) + return err; + + return 0; +} + +int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) +{ + struct mlx5e_priv *priv = cq->channel->priv; + u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {}; + u8 hw_status; + void *cqc; + int err; + + err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out)); + if (err) + return err; + + cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context); + hw_status = MLX5_GET(cqc, cqc, status); + + err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ"); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn); + if (err) + return err; + + err = devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status); + if (err) + return err; + + err = mlx5e_reporter_named_obj_nest_end(fmsg); + if (err) + return err; + + return 0; +} + +int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) +{ + u8 cq_log_stride; + u32 cq_sz; + int err; + + cq_sz = mlx5_cqwq_get_size(&cq->wq); + cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq); + + err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ"); + if (err) + return err; + + err = devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride)); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz); + if (err) + return err; + + err = mlx5e_reporter_named_obj_nest_end(fmsg); + if (err) + return err; + + return 0; +} + +int mlx5e_health_create_reporters(struct mlx5e_priv *priv) +{ + int err; + + err = mlx5e_reporter_tx_create(priv); + if (err) + return err; + + err = mlx5e_reporter_rx_create(priv); + if (err) + return err; + + return 0; +} + +void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv) +{ + mlx5e_reporter_rx_destroy(priv); + mlx5e_reporter_tx_destroy(priv); +} + +void mlx5e_health_channels_update(struct mlx5e_priv *priv) +{ + if (priv->tx_reporter) + devlink_health_reporter_state_update(priv->tx_reporter, + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); + if (priv->rx_reporter) + devlink_health_reporter_state_update(priv->rx_reporter, + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); +} + +int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn) +{ + struct mlx5_core_dev *mdev = channel->mdev; + struct net_device *dev = channel->netdev; + struct mlx5e_modify_sq_param msp = {}; + int err; + + msp.curr_state = MLX5_SQC_STATE_ERR; + msp.next_state = MLX5_SQC_STATE_RST; + + err = mlx5e_modify_sq(mdev, sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to reset\n", sqn); + return err; + } + + memset(&msp, 0, sizeof(msp)); + msp.curr_state = MLX5_SQC_STATE_RST; + msp.next_state = MLX5_SQC_STATE_RDY; + + err = mlx5e_modify_sq(mdev, sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to ready\n", sqn); + return err; + } + + return 0; +} + +int mlx5e_health_recover_channels(struct mlx5e_priv *priv) +{ + int err = 0; + + rtnl_lock(); + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto out; + + err = mlx5e_safe_reopen_channels(priv); + +out: + mutex_unlock(&priv->state_lock); + rtnl_unlock(); + + return err; +} + +int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel) +{ + u32 eqe_count; + + netdev_err(channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", + eq->core.eqn, eq->core.cons_index, eq->core.irqn); + + eqe_count = mlx5_eq_poll_irq_disabled(eq); + if (!eqe_count) + return -EIO; + + netdev_err(channel->netdev, "Recovered %d eqes on EQ 0x%x\n", + eqe_count, eq->core.eqn); + + channel->stats->eq_rearm++; + return 0; +} + +int mlx5e_health_report(struct mlx5e_priv *priv, + struct devlink_health_reporter *reporter, char *err_str, + struct mlx5e_err_ctx *err_ctx) +{ + if (!reporter) { + netdev_err(priv->netdev, err_str); + return err_ctx->recover(&err_ctx->ctx); + } + return devlink_health_report(reporter, err_str, err_ctx); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h new file mode 100644 index 000000000000..d3693fa547ac --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __MLX5E_EN_HEALTH_H +#define __MLX5E_EN_HEALTH_H + +#include "en.h" + +#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) + +static inline bool cqe_syndrome_needs_recover(u8 syndrome) +{ + return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR || + syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || + syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR || + syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR; +} + +int mlx5e_reporter_tx_create(struct mlx5e_priv *priv); +void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv); +void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); +int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq); + +int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); +int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); +int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); +int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg); + +int mlx5e_reporter_rx_create(struct mlx5e_priv *priv); +void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv); +void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq); +void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq); +void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq); + +#define MLX5E_REPORTER_PER_Q_MAX_LEN 256 + +struct mlx5e_err_ctx { + int (*recover)(void *ctx); + void *ctx; +}; + +int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn); +int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel); +int mlx5e_health_recover_channels(struct mlx5e_priv *priv); +int mlx5e_health_report(struct mlx5e_priv *priv, + struct devlink_health_reporter *reporter, char *err_str, + struct mlx5e_err_ctx *err_ctx); +int mlx5e_health_create_reporters(struct mlx5e_priv *priv); +void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv); +void mlx5e_health_channels_update(struct mlx5e_priv *priv); + + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c new file mode 100644 index 000000000000..b3a249b2a482 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2018 Mellanox Technologies + +#include "en.h" +#include "en/hv_vhca_stats.h" +#include "lib/hv_vhca.h" +#include "lib/hv.h" + +struct mlx5e_hv_vhca_per_ring_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; +}; + +static void +mlx5e_hv_vhca_fill_ring_stats(struct mlx5e_priv *priv, int ch, + struct mlx5e_hv_vhca_per_ring_stats *data) +{ + struct mlx5e_channel_stats *stats; + int tc; + + stats = &priv->channel_stats[ch]; + data->rx_packets = stats->rq.packets; + data->rx_bytes = stats->rq.bytes; + + for (tc = 0; tc < priv->max_opened_tc; tc++) { + data->tx_packets += stats->sq[tc].packets; + data->tx_bytes += stats->sq[tc].bytes; + } +} + +static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data, + int buf_len) +{ + int ch, i = 0; + + for (ch = 0; ch < priv->max_nch; ch++) { + void *buf = data + i; + + if (WARN_ON_ONCE(buf + + sizeof(struct mlx5e_hv_vhca_per_ring_stats) > + data + buf_len)) + return; + + mlx5e_hv_vhca_fill_ring_stats(priv, ch, buf); + i += sizeof(struct mlx5e_hv_vhca_per_ring_stats); + } +} + +static int mlx5e_hv_vhca_stats_buf_size(struct mlx5e_priv *priv) +{ + return (sizeof(struct mlx5e_hv_vhca_per_ring_stats) * + priv->max_nch); +} + +static void mlx5e_hv_vhca_stats_work(struct work_struct *work) +{ + struct mlx5e_hv_vhca_stats_agent *sagent; + struct mlx5_hv_vhca_agent *agent; + struct delayed_work *dwork; + struct mlx5e_priv *priv; + int buf_len, rc; + void *buf; + + dwork = to_delayed_work(work); + sagent = container_of(dwork, struct mlx5e_hv_vhca_stats_agent, work); + priv = container_of(sagent, struct mlx5e_priv, stats_agent); + buf_len = mlx5e_hv_vhca_stats_buf_size(priv); + agent = sagent->agent; + buf = sagent->buf; + + memset(buf, 0, buf_len); + mlx5e_hv_vhca_fill_stats(priv, buf, buf_len); + + rc = mlx5_hv_vhca_agent_write(agent, buf, buf_len); + if (rc) { + mlx5_core_err(priv->mdev, + "%s: Failed to write stats, err = %d\n", + __func__, rc); + return; + } + + if (sagent->delay) + queue_delayed_work(priv->wq, &sagent->work, sagent->delay); +} + +enum { + MLX5_HV_VHCA_STATS_VERSION = 1, + MLX5_HV_VHCA_STATS_UPDATE_ONCE = 0xFFFF, +}; + +static void mlx5e_hv_vhca_stats_control(struct mlx5_hv_vhca_agent *agent, + struct mlx5_hv_vhca_control_block *block) +{ + struct mlx5e_hv_vhca_stats_agent *sagent; + struct mlx5e_priv *priv; + + priv = mlx5_hv_vhca_agent_priv(agent); + sagent = &priv->stats_agent; + + block->version = MLX5_HV_VHCA_STATS_VERSION; + block->rings = priv->max_nch; + + if (!block->command) { + cancel_delayed_work_sync(&priv->stats_agent.work); + return; + } + + sagent->delay = block->command == MLX5_HV_VHCA_STATS_UPDATE_ONCE ? 0 : + msecs_to_jiffies(block->command * 100); + + queue_delayed_work(priv->wq, &sagent->work, sagent->delay); +} + +static void mlx5e_hv_vhca_stats_cleanup(struct mlx5_hv_vhca_agent *agent) +{ + struct mlx5e_priv *priv = mlx5_hv_vhca_agent_priv(agent); + + cancel_delayed_work_sync(&priv->stats_agent.work); +} + +int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv) +{ + int buf_len = mlx5e_hv_vhca_stats_buf_size(priv); + struct mlx5_hv_vhca_agent *agent; + + priv->stats_agent.buf = kvzalloc(buf_len, GFP_KERNEL); + if (!priv->stats_agent.buf) + return -ENOMEM; + + agent = mlx5_hv_vhca_agent_create(priv->mdev->hv_vhca, + MLX5_HV_VHCA_AGENT_STATS, + mlx5e_hv_vhca_stats_control, NULL, + mlx5e_hv_vhca_stats_cleanup, + priv); + + if (IS_ERR_OR_NULL(agent)) { + if (IS_ERR(agent)) + netdev_warn(priv->netdev, + "Failed to create hv vhca stats agent, err = %ld\n", + PTR_ERR(agent)); + + kfree(priv->stats_agent.buf); + return IS_ERR_OR_NULL(agent); + } + + priv->stats_agent.agent = agent; + INIT_DELAYED_WORK(&priv->stats_agent.work, mlx5e_hv_vhca_stats_work); + + return 0; +} + +void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv) +{ + if (IS_ERR_OR_NULL(priv->stats_agent.agent)) + return; + + mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent); + kfree(priv->stats_agent.buf); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h new file mode 100644 index 000000000000..664463faf77b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __MLX5_EN_STATS_VHCA_H__ +#define __MLX5_EN_STATS_VHCA_H__ +#include "en.h" + +#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) + +int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv); +void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv); + +#else + +static inline int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv) +{ + return 0; +} + +static inline void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv) +{ +} +#endif + +#endif /* __MLX5_EN_STATS_VHCA_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 79301d116667..eb2e1f2138e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -25,18 +25,33 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, return headroom; } -u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, - struct mlx5e_xsk_param *xsk) +u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk) { u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk); - u32 frag_sz = linear_rq_headroom + hw_mtu; + + return linear_rq_headroom + hw_mtu; +} + +u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk) +{ + u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk); /* AF_XDP doesn't build SKBs in place. */ if (!xsk) frag_sz = MLX5_SKB_FRAG_SZ(frag_sz); - /* XDP in mlx5e doesn't support multiple packets per page. */ + /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a + * special case. It can run with frames smaller than a page, as it + * doesn't allocate pages dynamically. However, here we pretend that + * fragments are page-sized: it allows to treat XSK frames like pages + * by redirecting alloc and free operations to XSK rings and by using + * the fact there are no multiple packets per "page" (which is a frame). + * The latter is important, because frames may come in a random order, + * and we will have trouble assemblying a real page of multiple frames. + */ if (mlx5e_rx_is_xdp(params, xsk)) frag_sz = max_t(u32, frag_sz, PAGE_SIZE); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 3a615d663d84..989d8f429438 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -76,6 +76,8 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile, u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); +u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk); u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h deleted file mode 100644 index e78e92753d73..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2019 Mellanox Technologies. */ - -#ifndef __MLX5E_EN_REPORTER_H -#define __MLX5E_EN_REPORTER_H - -#include <linux/mlx5/driver.h> -#include "en.h" - -int mlx5e_tx_reporter_create(struct mlx5e_priv *priv); -void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv); -void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq); -int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq); - -#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c new file mode 100644 index 000000000000..b860569d4247 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Mellanox Technologies. + +#include "health.h" +#include "params.h" + +static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state) +{ + int outlen = MLX5_ST_SZ_BYTES(query_rq_out); + void *out; + void *rqc; + int err; + + out = kvzalloc(outlen, GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = mlx5_core_query_rq(dev, rqn, out); + if (err) + goto out; + + rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context); + *state = MLX5_GET(rqc, rqc, state); + +out: + kvfree(out); + return err; +} + +static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq) +{ + unsigned long exp_time = jiffies + msecs_to_jiffies(2000); + + while (time_before(jiffies, exp_time)) { + if (icosq->cc == icosq->pc) + return 0; + + msleep(20); + } + + netdev_err(icosq->channel->netdev, + "Wait for ICOSQ 0x%x flush timeout (cc = 0x%x, pc = 0x%x)\n", + icosq->sqn, icosq->cc, icosq->pc); + + return -ETIMEDOUT; +} + +static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq) +{ + WARN_ONCE(icosq->cc != icosq->pc, "ICOSQ 0x%x: cc (0x%x) != pc (0x%x)\n", + icosq->sqn, icosq->cc, icosq->pc); + icosq->cc = 0; + icosq->pc = 0; +} + +static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) +{ + struct mlx5_core_dev *mdev; + struct mlx5e_icosq *icosq; + struct net_device *dev; + struct mlx5e_rq *rq; + u8 state; + int err; + + icosq = ctx; + rq = &icosq->channel->rq; + mdev = icosq->channel->mdev; + dev = icosq->channel->netdev; + err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state); + if (err) { + netdev_err(dev, "Failed to query ICOSQ 0x%x state. err = %d\n", + icosq->sqn, err); + goto out; + } + + if (state != MLX5_SQC_STATE_ERR) + goto out; + + mlx5e_deactivate_rq(rq); + err = mlx5e_wait_for_icosq_flush(icosq); + if (err) + goto out; + + mlx5e_deactivate_icosq(icosq); + + /* At this point, both the rq and the icosq are disabled */ + + err = mlx5e_health_sq_to_ready(icosq->channel, icosq->sqn); + if (err) + goto out; + + mlx5e_reset_icosq_cc_pc(icosq); + mlx5e_free_rx_descs(rq); + clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); + mlx5e_activate_icosq(icosq); + mlx5e_activate_rq(rq); + + rq->stats->recover++; + return 0; +out: + clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); + return err; +} + +void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq) +{ + struct mlx5e_priv *priv = icosq->channel->priv; + char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_err_ctx err_ctx = {}; + + err_ctx.ctx = icosq; + err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover; + sprintf(err_str, "ERR CQE on ICOSQ: 0x%x", icosq->sqn); + + mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); +} + +static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state) +{ + struct net_device *dev = rq->netdev; + int err; + + err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST); + if (err) { + netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn); + return err; + } + err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + if (err) { + netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn); + return err; + } + + return 0; +} + +static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx) +{ + struct mlx5_core_dev *mdev; + struct net_device *dev; + struct mlx5e_rq *rq; + u8 state; + int err; + + rq = ctx; + mdev = rq->mdev; + dev = rq->netdev; + err = mlx5e_query_rq_state(mdev, rq->rqn, &state); + if (err) { + netdev_err(dev, "Failed to query RQ 0x%x state. err = %d\n", + rq->rqn, err); + goto out; + } + + if (state != MLX5_RQC_STATE_ERR) + goto out; + + mlx5e_deactivate_rq(rq); + mlx5e_free_rx_descs(rq); + + err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR); + if (err) + goto out; + + clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state); + mlx5e_activate_rq(rq); + rq->stats->recover++; + return 0; +out: + clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state); + return err; +} + +void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq) +{ + struct mlx5e_priv *priv = rq->channel->priv; + char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_err_ctx err_ctx = {}; + + err_ctx.ctx = rq; + err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover; + sprintf(err_str, "ERR CQE on RQ: 0x%x", rq->rqn); + + mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); +} + +static int mlx5e_rx_reporter_timeout_recover(void *ctx) +{ + struct mlx5e_icosq *icosq; + struct mlx5_eq_comp *eq; + struct mlx5e_rq *rq; + int err; + + rq = ctx; + icosq = &rq->channel->icosq; + eq = rq->cq.mcq.eq; + err = mlx5e_health_channel_eq_recover(eq, rq->channel); + if (err) + clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); + + return err; +} + +void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq) +{ + struct mlx5e_icosq *icosq = &rq->channel->icosq; + struct mlx5e_priv *priv = rq->channel->priv; + char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_err_ctx err_ctx = {}; + + err_ctx.ctx = rq; + err_ctx.recover = mlx5e_rx_reporter_timeout_recover; + sprintf(err_str, "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x\n", + icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn); + + mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); +} + +static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx) +{ + return err_ctx->recover(err_ctx->ctx); +} + +static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter, + void *context) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_err_ctx *err_ctx = context; + + return err_ctx ? mlx5e_rx_reporter_recover_from_ctx(err_ctx) : + mlx5e_health_recover_channels(priv); +} + +static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) +{ + struct mlx5e_priv *priv = rq->channel->priv; + struct mlx5e_params *params; + struct mlx5e_icosq *icosq; + u8 icosq_hw_state; + int wqes_sz; + u8 hw_state; + u16 wq_head; + int err; + + params = &priv->channels.params; + icosq = &rq->channel->icosq; + err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state); + if (err) + return err; + + err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state); + if (err) + return err; + + wqes_sz = mlx5e_rqwq_get_cur_sz(rq); + wq_head = params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? + rq->mpwqe.wq.head : mlx5_wq_cyc_get_head(&rq->wqe.wq); + + err = devlink_fmsg_obj_nest_start(fmsg); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn); + if (err) + return err; + + err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state); + if (err) + return err; + + err = devlink_fmsg_u8_pair_put(fmsg, "SW state", rq->state); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head); + if (err) + return err; + + err = devlink_fmsg_u8_pair_put(fmsg, "ICOSQ HW state", icosq_hw_state); + if (err) + return err; + + err = mlx5e_reporter_cq_diagnose(&rq->cq, fmsg); + if (err) + return err; + + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; + + return 0; +} + +static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_params *params = &priv->channels.params; + struct mlx5e_rq *generic_rq; + u32 rq_stride, rq_sz; + int i, err = 0; + + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock; + + generic_rq = &priv->channels.c[0]->rq; + rq_sz = mlx5e_rqwq_get_size(generic_rq); + rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL)); + + err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common config"); + if (err) + goto unlock; + + err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ"); + if (err) + goto unlock; + + err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type); + if (err) + goto unlock; + + err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride); + if (err) + goto unlock; + + err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz); + if (err) + goto unlock; + + err = mlx5e_reporter_named_obj_nest_end(fmsg); + if (err) + goto unlock; + + err = mlx5e_reporter_cq_common_diagnose(&generic_rq->cq, fmsg); + if (err) + goto unlock; + + err = mlx5e_reporter_named_obj_nest_end(fmsg); + if (err) + goto unlock; + + err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs"); + if (err) + goto unlock; + + for (i = 0; i < priv->channels.num; i++) { + struct mlx5e_rq *rq = &priv->channels.c[i]->rq; + + err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg); + if (err) + goto unlock; + } + err = devlink_fmsg_arr_pair_nest_end(fmsg); + if (err) + goto unlock; +unlock: + mutex_unlock(&priv->state_lock); + return err; +} + +static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = { + .name = "rx", + .recover = mlx5e_rx_reporter_recover, + .diagnose = mlx5e_rx_reporter_diagnose, +}; + +#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500 + +int mlx5e_reporter_rx_create(struct mlx5e_priv *priv) +{ + struct devlink *devlink = priv_to_devlink(priv->mdev); + struct devlink_health_reporter *reporter; + + reporter = devlink_health_reporter_create(devlink, + &mlx5_rx_reporter_ops, + MLX5E_REPORTER_RX_GRACEFUL_PERIOD, + true, priv); + if (IS_ERR(reporter)) { + netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n", + PTR_ERR(reporter)); + return PTR_ERR(reporter); + } + priv->rx_reporter = reporter; + return 0; +} + +void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv) +{ + if (!priv->rx_reporter) + return; + + devlink_health_reporter_destroy(priv->rx_reporter); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index c7f86453c638..bfed558637c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -1,16 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2019 Mellanox Technologies. */ -#include <net/devlink.h> -#include "reporter.h" -#include "lib/eq.h" - -#define MLX5E_TX_REPORTER_PER_SQ_MAX_LEN 256 - -struct mlx5e_tx_err_ctx { - int (*recover)(struct mlx5e_txqsq *sq); - struct mlx5e_txqsq *sq; -}; +#include "health.h" static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) { @@ -40,41 +31,20 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) sq->pc = 0; } -static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state) +static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) { - struct mlx5_core_dev *mdev = sq->channel->mdev; - struct net_device *dev = sq->channel->netdev; - struct mlx5e_modify_sq_param msp = {0}; + struct mlx5_core_dev *mdev; + struct net_device *dev; + struct mlx5e_txqsq *sq; + u8 state; int err; - msp.curr_state = curr_state; - msp.next_state = MLX5_SQC_STATE_RST; - - err = mlx5e_modify_sq(mdev, sq->sqn, &msp); - if (err) { - netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn); - return err; - } - - memset(&msp, 0, sizeof(msp)); - msp.curr_state = MLX5_SQC_STATE_RST; - msp.next_state = MLX5_SQC_STATE_RDY; - - err = mlx5e_modify_sq(mdev, sq->sqn, &msp); - if (err) { - netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn); - return err; - } - - return 0; -} + sq = ctx; + mdev = sq->channel->mdev; + dev = sq->channel->netdev; -static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) -{ - struct mlx5_core_dev *mdev = sq->channel->mdev; - struct net_device *dev = sq->channel->netdev; - u8 state; - int err; + if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) + return 0; err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); if (err) { @@ -97,7 +67,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) * pending WQEs. SQ can safely reset the SQ. */ - err = mlx5e_sq_to_ready(sq, state); + err = mlx5e_health_sq_to_ready(sq->channel, sq->sqn); if (err) goto out; @@ -112,115 +82,98 @@ out: return err; } -static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, - char *err_str, - struct mlx5e_tx_err_ctx *err_ctx) +void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq) { - if (IS_ERR_OR_NULL(tx_reporter)) { - netdev_err(err_ctx->sq->channel->netdev, err_str); - return err_ctx->recover(err_ctx->sq); - } - - return devlink_health_report(tx_reporter, err_str, err_ctx); -} + struct mlx5e_priv *priv = sq->channel->priv; + char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_err_ctx err_ctx = {0}; -void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq) -{ - char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN]; - struct mlx5e_tx_err_ctx err_ctx = {0}; - - err_ctx.sq = sq; - err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover; + err_ctx.ctx = sq; + err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover; sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn); - mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str, - &err_ctx); + mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); } -static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq) +static int mlx5e_tx_reporter_timeout_recover(void *ctx) { - struct mlx5_eq_comp *eq = sq->cq.mcq.eq; - u32 eqe_count; - - netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", - eq->core.eqn, eq->core.cons_index, eq->core.irqn); + struct mlx5_eq_comp *eq; + struct mlx5e_txqsq *sq; + int err; - eqe_count = mlx5_eq_poll_irq_disabled(eq); - if (!eqe_count) { + sq = ctx; + eq = sq->cq.mcq.eq; + err = mlx5e_health_channel_eq_recover(eq, sq->channel); + if (err) clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - return -EIO; - } - netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n", - eqe_count, eq->core.eqn); - sq->channel->stats->eq_rearm++; - return 0; + return err; } -int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq) +int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) { - char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN]; - struct mlx5e_tx_err_ctx err_ctx; + struct mlx5e_priv *priv = sq->channel->priv; + char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_err_ctx err_ctx; - err_ctx.sq = sq; - err_ctx.recover = mlx5e_tx_reporter_timeout_recover; + err_ctx.ctx = sq; + err_ctx.recover = mlx5e_tx_reporter_timeout_recover; sprintf(err_str, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, jiffies_to_usecs(jiffies - sq->txq->trans_start)); - return mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str, - &err_ctx); + return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); } /* state lock cannot be grabbed within this function. * It can cause a dead lock or a read-after-free. */ -static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx) -{ - return err_ctx->recover(err_ctx->sq); -} - -static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) +static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx) { - int err = 0; - - rtnl_lock(); - mutex_lock(&priv->state_lock); - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto out; - - err = mlx5e_safe_reopen_channels(priv); - -out: - mutex_unlock(&priv->state_lock); - rtnl_unlock(); - - return err; + return err_ctx->recover(err_ctx->ctx); } static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, void *context) { struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); - struct mlx5e_tx_err_ctx *err_ctx = context; + struct mlx5e_err_ctx *err_ctx = context; return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) : - mlx5e_tx_reporter_recover_all(priv); + mlx5e_health_recover_channels(priv); } static int mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, - u32 sqn, u8 state, bool stopped) + struct mlx5e_txqsq *sq, int tc) { + struct mlx5e_priv *priv = sq->channel->priv; + bool stopped = netif_xmit_stopped(sq->txq); + u8 state; int err; + err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); + if (err) + return err; + err = devlink_fmsg_obj_nest_start(fmsg); if (err) return err; - err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sqn); + err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn); if (err) return err; @@ -232,6 +185,18 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, if (err) return err; + err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc); + if (err) + return err; + + err = mlx5e_reporter_cq_diagnose(&sq->cq, fmsg); + if (err) + return err; + err = devlink_fmsg_obj_nest_end(fmsg); if (err) return err; @@ -243,31 +208,61 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg) { struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); - int i, err = 0; + struct mlx5e_txqsq *generic_sq = priv->txq2sq[0]; + u32 sq_stride, sq_sz; + + int i, tc, err = 0; mutex_lock(&priv->state_lock); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; + sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq); + sq_stride = MLX5_SEND_WQE_BB; + + err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common Config"); + if (err) + goto unlock; + + err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ"); + if (err) + goto unlock; + + err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride); + if (err) + goto unlock; + + err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz); + if (err) + goto unlock; + + err = mlx5e_reporter_cq_common_diagnose(&generic_sq->cq, fmsg); + if (err) + goto unlock; + + err = mlx5e_reporter_named_obj_nest_end(fmsg); + if (err) + goto unlock; + + err = mlx5e_reporter_named_obj_nest_end(fmsg); + if (err) + goto unlock; + err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); if (err) goto unlock; - for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; - i++) { - struct mlx5e_txqsq *sq = priv->txq2sq[i]; - u8 state; + for (i = 0; i < priv->channels.num; i++) { + struct mlx5e_channel *c = priv->channels.c[i]; - err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); - if (err) - goto unlock; + for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + struct mlx5e_txqsq *sq = &c->sq[tc]; - err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn, - state, - netif_xmit_stopped(sq->txq)); - if (err) - goto unlock; + err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc); + if (err) + goto unlock; + } } err = devlink_fmsg_arr_pair_nest_end(fmsg); if (err) @@ -286,25 +281,30 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500 -int mlx5e_tx_reporter_create(struct mlx5e_priv *priv) +int mlx5e_reporter_tx_create(struct mlx5e_priv *priv) { + struct devlink_health_reporter *reporter; struct mlx5_core_dev *mdev = priv->mdev; - struct devlink *devlink = priv_to_devlink(mdev); + struct devlink *devlink; - priv->tx_reporter = + devlink = priv_to_devlink(mdev); + reporter = devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops, MLX5_REPORTER_TX_GRACEFUL_PERIOD, true, priv); - if (IS_ERR(priv->tx_reporter)) + if (IS_ERR(reporter)) { netdev_warn(priv->netdev, "Failed to create tx reporter, err = %ld\n", - PTR_ERR(priv->tx_reporter)); - return IS_ERR_OR_NULL(priv->tx_reporter); + PTR_ERR(reporter)); + return PTR_ERR(reporter); + } + priv->tx_reporter = reporter; + return 0; } -void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv) +void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv) { - if (IS_ERR_OR_NULL(priv->tx_reporter)) + if (!priv->tx_reporter) return; devlink_health_reporter_destroy(priv->tx_reporter); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index a6a52806be45..f8ee18b4da6f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -31,29 +31,36 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev; uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); - uplink_upper = netdev_master_upper_dev_get(uplink_dev); + + rcu_read_lock(); + uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev); + /* mlx5_lag_is_sriov() is a blocking function which can't be called + * while holding rcu read lock. Take the net_device for correctness + * sake. + */ + if (uplink_upper) + dev_hold(uplink_upper); + rcu_read_unlock(); + dst_is_lag_dev = (uplink_upper && netif_is_lag_master(uplink_upper) && real_dev == uplink_upper && mlx5_lag_is_sriov(priv->mdev)); + if (uplink_upper) + dev_put(uplink_upper); /* if the egress device isn't on the same HW e-switch or * it's a LAG device, use the uplink */ + *route_dev = dev; if (!netdev_port_same_parent_id(priv->netdev, real_dev) || - dst_is_lag_dev) { - *route_dev = dev; + dst_is_lag_dev || is_vlan_dev(*route_dev)) *out_dev = uplink_dev; - } else { - *route_dev = dev; - if (is_vlan_dev(*route_dev)) - *out_dev = uplink_dev; - else if (mlx5e_eswitch_rep(dev) && - mlx5e_is_valid_eswitch_fwd_dev(priv, dev)) - *out_dev = *route_dev; - else - return -EOPNOTSUPP; - } + else if (mlx5e_eswitch_rep(dev) && + mlx5e_is_valid_eswitch_fwd_dev(priv, dev)) + *out_dev = *route_dev; + else + return -EOPNOTSUPP; if (!(mlx5e_eswitch_rep(*out_dev) && mlx5e_is_uplink_rep(netdev_priv(*out_dev)))) @@ -284,14 +291,14 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, */ goto out; } - - err = mlx5_packet_reformat_alloc(priv->mdev, - e->reformat_type, - ipv4_encap_size, encap_header, - MLX5_FLOW_NAMESPACE_FDB, - &e->encap_id); - if (err) + e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, + ipv4_encap_size, encap_header, + MLX5_FLOW_NAMESPACE_FDB); + if (IS_ERR(e->pkt_reformat)) { + err = PTR_ERR(e->pkt_reformat); goto destroy_neigh_entry; + } e->flags |= MLX5_ENCAP_ENTRY_VALID; mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); @@ -400,13 +407,14 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, goto out; } - err = mlx5_packet_reformat_alloc(priv->mdev, - e->reformat_type, - ipv6_encap_size, encap_header, - MLX5_FLOW_NAMESPACE_FDB, - &e->encap_id); - if (err) + e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, + ipv6_encap_size, encap_header, + MLX5_FLOW_NAMESPACE_FDB); + if (IS_ERR(e->pkt_reformat)) { + err = PTR_ERR(e->pkt_reformat); goto destroy_neigh_entry; + } e->flags |= MLX5_ENCAP_ENTRY_VALID; mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index ddfe19adb3d9..87be96747902 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -6,7 +6,7 @@ #include "en.h" -#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS +#define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1) #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ MLX5E_SQ_NOPS_ROOM) @@ -117,9 +117,27 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, mlx5_write64((__be32 *)ctrl, uar_map); } -static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe) +static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg) { - return !!wqe->ctrl.tisn; + return cseg && !!cseg->tisn; +} + +static inline u8 +mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, + struct sk_buff *skb) +{ + u8 mode; + + if (mlx5e_transport_inline_tx_wqe(cseg)) + return MLX5_INLINE_MODE_TCP_UDP; + + mode = sq->min_inline_mode; + + if (skb_vlan_tag_present(skb) && + test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state)) + mode = max_t(u8, MLX5_INLINE_MODE_L2, mode); + + return mode; } static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index b0b982cf69bb..f049e0ac308a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -122,6 +122,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, void *va, u16 *rx_headroom, u32 *len, bool xsk) { struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); + struct xdp_umem *umem = rq->umem; struct xdp_buff xdp; u32 act; int err; @@ -138,8 +139,11 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, xdp.rxq = &rq->xdp_rxq; act = bpf_prog_run_xdp(prog, &xdp); - if (xsk) - xdp.handle += xdp.data - xdp.data_hard_start; + if (xsk) { + u64 off = xdp.data - xdp.data_hard_start; + + xdp.handle = xsk_umem_adjust_offset(umem, xdp.handle, off); + } switch (act) { case XDP_PASS: *rx_headroom = xdp.data - xdp.data_hard_start; @@ -179,33 +183,19 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; struct mlx5e_xdpsq_stats *stats = sq->stats; struct mlx5_wq_cyc *wq = &sq->wq; - u8 wqebbs; - u16 pi; - - mlx5e_xdpsq_fetch_wqe(sq, &session->wqe); - - prefetchw(session->wqe->data); - session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT; - session->pkt_count = 0; - session->complete = 0; + u16 pi, contig_wqebbs; pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); -/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS - * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. - * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a - * full-session WQE be cache-aligned. - */ -#if L1_CACHE_BYTES < 128 -#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1) -#else -#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2) -#endif + if (unlikely(contig_wqebbs < MLX5_SEND_WQE_MAX_WQEBBS)) + mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs); - wqebbs = min_t(u16, mlx5_wq_cyc_get_contig_wqebbs(wq, pi), - MLX5E_XDP_MPW_MAX_WQEBBS); + session->wqe = mlx5e_xdpsq_fetch_wqe(sq, &pi); - session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs; + prefetchw(session->wqe->data); + session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT; + session->pkt_count = 0; mlx5e_xdp_update_inline_state(sq); @@ -244,7 +234,7 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) { if (unlikely(!sq->mpwqe.wqe)) { if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, - MLX5_SEND_WQE_MAX_WQEBBS))) { + MLX5E_XDPSQ_STOP_ROOM))) { /* SQ is full, ring doorbell */ mlx5e_xmit_xdp_doorbell(sq); sq->stats->full++; @@ -285,8 +275,8 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats); - if (unlikely(session->complete || - session->ds_count == session->max_ds_count)) + if (unlikely(mlx5e_xdp_no_room_for_inline_pkt(session) || + session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS)) mlx5e_xdp_mpwqe_complete(sq); mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index b90923932668..36ac1e3816b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -40,6 +40,26 @@ (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */) +#define MLX5E_XDPSQ_STOP_ROOM (MLX5E_SQ_STOP_ROOM) + +#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg)) +#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \ + DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS) + +/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS + * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. + * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a + * full-session WQE be cache-aligned. + */ +#if L1_CACHE_BYTES < 128 +#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1) +#else +#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2) +#endif + +#define MLX5E_XDP_MPW_MAX_NUM_DS \ + (MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS) + struct mlx5e_xsk_param; int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, @@ -114,6 +134,30 @@ static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq) session->inline_on = 1; } +static inline bool +mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session) +{ + return session->inline_on && + session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS; +} + +static inline void +mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq, + u16 pi, u16 nnops) +{ + struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; + + edge_wi = wi + nnops; + /* fill sq frag edge with nops to avoid wqe wrapping two pages */ + for (; wi < edge_wi; wi++) { + wi->num_wqebbs = 1; + wi->num_pkts = 0; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + } + + sq->stats->nops += nnops; +} + static inline void mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd, @@ -126,20 +170,12 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, session->pkt_count++; -#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg)) - if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) { struct mlx5_wqe_inline_seg *inline_dseg = (struct mlx5_wqe_inline_seg *)dseg; u16 ds_len = sizeof(*inline_dseg) + dma_len; u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS); - if (unlikely(session->ds_count + ds_cnt > session->max_ds_count)) { - /* Not enough space for inline wqe, send with memory pointer */ - session->complete = true; - goto no_inline; - } - inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG); memcpy(inline_dseg->data, xdptxd->data, dma_len); @@ -148,21 +184,23 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, return; } -no_inline: dseg->addr = cpu_to_be64(xdptxd->dma_addr); dseg->byte_count = cpu_to_be32(dma_len); dseg->lkey = sq->mkey_be; session->ds_count++; } -static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, - struct mlx5e_tx_wqe **wqe) +static inline struct mlx5e_tx_wqe * +mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi) { struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + struct mlx5e_tx_wqe *wqe; + + *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + wqe = mlx5_wq_cyc_get_wqe(wq, *pi); + memset(wqe, 0, sizeof(*wqe)); - *wqe = mlx5_wq_cyc_get_wqe(wq, pi); - memset(*wqe, 0, sizeof(**wqe)); + return wqe; } static inline void diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c index 6a55573ec8f2..475b6bd5d29b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c @@ -24,7 +24,8 @@ int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq, if (!xsk_umem_peek_addr_rq(umem, &handle)) return -ENOMEM; - dma_info->xsk.handle = handle + rq->buff.umem_headroom; + dma_info->xsk.handle = xsk_umem_adjust_offset(umem, handle, + rq->buff.umem_headroom); dma_info->xsk.data = xdp_umem_get_data(umem, dma_info->xsk.handle); /* No need to add headroom to the DMA address. In striding RQ case, we @@ -104,7 +105,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, /* head_offset is not used in this function, because di->xsk.data and * di->addr point directly to the necessary place. Furthermore, in the - * current implementation, one page = one packet = one frame, so + * current implementation, UMR pages are mapped to XSK frames, so * head_offset should always be 0. */ WARN_ON_ONCE(head_offset); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h index 307b923a1361..cab0e93497ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h @@ -5,6 +5,7 @@ #define __MLX5_EN_XSK_RX_H__ #include "en.h" +#include <net/xdp_sock.h> /* RX data path */ @@ -24,4 +25,17 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); +static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err) +{ + if (!xsk_umem_uses_need_wakeup(rq->umem)) + return alloc_err; + + if (unlikely(alloc_err)) + xsk_set_rx_need_wakeup(rq->umem); + else + xsk_clear_rx_need_wakeup(rq->umem); + + return false; +} + #endif /* __MLX5_EN_XSK_RX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 7f78c004d12f..631af8dee517 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -4,18 +4,23 @@ #include "setup.h" #include "en/params.h" +/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may + * change unexpectedly, and mlx5e has a minimum valid stride size for striding + * RQ, keep this check in the driver. + */ +#define MLX5E_MIN_XSK_CHUNK_SIZE 2048 + bool mlx5e_validate_xsk_param(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct mlx5_core_dev *mdev) { - /* AF_XDP doesn't support frames larger than PAGE_SIZE, and the current - * mlx5e XDP implementation doesn't support multiple packets per page. - */ - if (xsk->chunk_size != PAGE_SIZE) + /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ + if (xsk->chunk_size > PAGE_SIZE || + xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) return false; /* Current MTU and XSK headroom don't allow packets to fit the frames. */ - if (mlx5e_rx_get_linear_frag_sz(params, xsk) > xsk->chunk_size) + if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size) return false; /* frag_sz is different for regular and XSK RQs, so ensure that linear @@ -60,24 +65,28 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct xdp_umem *umem, struct mlx5e_channel *c) { - struct mlx5e_channel_param cparam = {}; + struct mlx5e_channel_param *cparam; struct dim_cq_moder icocq_moder = {}; int err; if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) return -EINVAL; - mlx5e_build_xsk_cparam(priv, params, xsk, &cparam); + cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL); + if (!cparam) + return -ENOMEM; + + mlx5e_build_xsk_cparam(priv, params, xsk, cparam); - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam.rx_cq, &c->xskrq.cq); + err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq); if (unlikely(err)) - return err; + goto err_free_cparam; - err = mlx5e_open_rq(c, params, &cparam.rq, xsk, umem, &c->xskrq); + err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq); if (unlikely(err)) goto err_close_rx_cq; - err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam.tx_cq, &c->xsksq.cq); + err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq); if (unlikely(err)) goto err_close_rq; @@ -87,21 +96,23 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, * is disabled and then reenabled, but the SQ continues receiving CQEs * from the old UMEM. */ - err = mlx5e_open_xdpsq(c, params, &cparam.xdp_sq, umem, &c->xsksq, true); + err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true); if (unlikely(err)) goto err_close_tx_cq; - err = mlx5e_open_cq(c, icocq_moder, &cparam.icosq_cq, &c->xskicosq.cq); + err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq); if (unlikely(err)) goto err_close_sq; /* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be * triggered and NAPI to be called on the correct CPU. */ - err = mlx5e_open_icosq(c, params, &cparam.icosq, &c->xskicosq); + err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq); if (unlikely(err)) goto err_close_icocq; + kvfree(cparam); + spin_lock_init(&c->xskicosq_lock); set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); @@ -123,6 +134,9 @@ err_close_rq: err_close_rx_cq: mlx5e_close_cq(&c->xskrq.cq); +err_free_cparam: + kvfree(cparam); + return err; } @@ -141,6 +155,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c) void mlx5e_activate_xsk(struct mlx5e_channel *c) { + mlx5e_activate_icosq(&c->xskicosq); set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); /* TX queue is created active. */ @@ -153,6 +168,7 @@ void mlx5e_deactivate_xsk(struct mlx5e_channel *c) { mlx5e_deactivate_rq(&c->xskrq); /* TX queue is disabled on close. */ + mlx5e_deactivate_icosq(&c->xskicosq); } static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 35e188cf4ea4..87827477d38c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -7,7 +7,7 @@ #include "en/params.h" #include <net/xdp_sock.h> -int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid) +int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_params *params = &priv->channels.params; @@ -26,6 +26,13 @@ int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid) return -ENXIO; if (!napi_if_scheduled_mark_missed(&c->napi)) { + /* To avoid WQE overrun, don't post a NOP if XSKICOSQ is not + * active and not polled by NAPI. Return 0, because the upcoming + * activate will trigger the IRQ for us. + */ + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state))) + return 0; + spin_lock(&c->xskicosq_lock); mlx5e_trigger_irq(&c->xskicosq); spin_unlock(&c->xskicosq_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h index 7add18bf78d8..79b487d89757 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h @@ -5,11 +5,23 @@ #define __MLX5_EN_XSK_TX_H__ #include "en.h" +#include <net/xdp_sock.h> /* TX data path */ -int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid); +int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget); +static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq) +{ + if (!xsk_umem_uses_need_wakeup(sq->umem)) + return; + + if (sq->pc != sq->cc) + xsk_clear_tx_need_wakeup(sq->umem); + else + xsk_set_tx_need_wakeup(sq->umem); +} + #endif /* __MLX5_EN_XSK_TX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 7833ddef0427..d195366461c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -256,8 +256,7 @@ struct mlx5e_dump_wqe { }; static int -tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, - skb_frag_t *frag, u32 tisn, bool first) +tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first) { struct mlx5_wqe_ctrl_seg *cseg; struct mlx5_wqe_data_seg *dseg; @@ -371,8 +370,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, tx_post_resync_params(sq, priv_tx, info.rcd_sn); for (i = 0; i < info.nr_frags; i++) - if (tx_post_resync_dump(sq, skb, info.frags[i], - priv_tx->tisn, !i)) + if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i)) goto err_out; /* If no dump WQE was sent, we need to have a fence NOP WQE before the @@ -408,7 +406,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, goto out; tls_ctx = tls_get_ctx(skb->sk); - if (unlikely(WARN_ON_ONCE(tls_ctx->netdev != netdev))) + if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) goto err_out; priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 1539cf3de5dc..f7890e0ce96c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -180,15 +180,3 @@ out: return err; } - -u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev) -{ - u8 min_inline_mode; - - mlx5_query_min_inline(mdev, &min_inline_mode); - if (min_inline_mode == MLX5_INLINE_MODE_NONE && - !MLX5_CAP_ETH(mdev, wqe_vlan_insert)) - min_inline_mode = MLX5_INLINE_MODE_L2; - - return min_inline_mode; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 8dd31b5c740c..01f2918063af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1101,7 +1101,7 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv, struct mlx5e_params *params) { - params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev); + mlx5_query_min_inline(priv->mdev, ¶ms->tx_min_inline_mode); if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP && params->tx_min_inline_mode == MLX5_INLINE_MODE_L2) params->tx_min_inline_mode = MLX5_INLINE_MODE_IP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 20e628c907e5..c5a9c20d7f00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1431,7 +1431,7 @@ static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev) return ret; } -static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode) +static __u32 mlx5e_reformat_wol_mode_mlx5_to_linux(u8 mode) { __u32 ret = 0; @@ -1459,7 +1459,7 @@ static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode) return ret; } -static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode) +static u8 mlx5e_reformat_wol_mode_linux_to_mlx5(__u32 mode) { u8 ret = 0; @@ -1505,7 +1505,7 @@ static void mlx5e_get_wol(struct net_device *netdev, if (err) return; - wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode); + wol->wolopts = mlx5e_reformat_wol_mode_mlx5_to_linux(mlx5_wol_mode); } static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -1521,7 +1521,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~wol_supported) return -EINVAL; - mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts); + mlx5_wol_mode = mlx5e_reformat_wol_mode_linux_to_mlx5(wol->wolopts); return mlx5_set_port_wol(mdev, mlx5_wol_mode); } @@ -1958,21 +1958,27 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev) return priv->channels.params.pflags; } -#ifndef CONFIG_MLX5_EN_RXNFC -/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS - * otherwise this function will be defined from en_fs_ethtool.c - */ static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) { struct mlx5e_priv *priv = netdev_priv(dev); - if (info->cmd != ETHTOOL_GRXRINGS) - return -EOPNOTSUPP; - /* ring_count is needed by ethtool -x */ - info->data = priv->channels.params.num_channels; - return 0; + /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part + * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc, + * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc + * is compiled out via CONFIG_MLX5_EN_RXNFC=n. + */ + if (info->cmd == ETHTOOL_GRXRINGS) { + info->data = priv->channels.params.num_channels; + return 0; + } + + return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs); +} + +static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + return mlx5e_ethtool_set_rxnfc(dev, cmd); } -#endif const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, @@ -1993,9 +1999,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, .get_rxnfc = mlx5e_get_rxnfc, -#ifdef CONFIG_MLX5_EN_RXNFC .set_rxnfc = mlx5e_set_rxnfc, -#endif .get_tunable = mlx5e_get_tunable, .set_tunable = mlx5e_set_tunable, .get_pauseparam = mlx5e_get_pauseparam, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 76cc10e44080..15b7f0f1427c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -747,8 +747,55 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = { .etype = ETH_P_IPV6, .proto = IPPROTO_GRE, }, + [MLX5E_TT_IPV4_IPIP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_IPIP, + }, + [MLX5E_TT_IPV6_IPIP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_IPIP, + }, + [MLX5E_TT_IPV4_IPV6] = { + .etype = ETH_P_IP, + .proto = IPPROTO_IPV6, + }, + [MLX5E_TT_IPV6_IPV6] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_IPV6, + }, + }; +bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type) +{ + switch (proto_type) { + case IPPROTO_GRE: + return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); + case IPPROTO_IPIP: + case IPPROTO_IPV6: + return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip); + default: + return false; + } +} + +bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) +{ + int tt; + + for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { + if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto)) + return true; + } + return false; +} + +bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) +{ + return (mlx5e_any_tunnel_proto_supported(mdev) && + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); +} + static u8 mlx5e_etype_to_ipv(u16 ethertype) { if (ethertype == ETH_P_IP) @@ -838,6 +885,9 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = params->inner_ttc->ft.t; for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { + if (!mlx5e_tunnel_proto_supported(priv->mdev, + ttc_tunnel_rules[tt].proto)) + continue; rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, ttc_tunnel_rules[tt].etype, ttc_tunnel_rules[tt].proto); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 94304abc49e9..eed7101e8bb7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -888,10 +888,10 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv, return 0; } -int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { - int err = 0; struct mlx5e_priv *priv = netdev_priv(dev); + int err = 0; switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: @@ -911,16 +911,13 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return err; } -int mlx5e_get_rxnfc(struct net_device *dev, - struct ethtool_rxnfc *info, u32 *rule_locs) +int mlx5e_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rule_locs) { struct mlx5e_priv *priv = netdev_priv(dev); int err = 0; switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = priv->channels.params.num_channels; - break; case ETHTOOL_GRXCLSRLCNT: info->rule_cnt = priv->fs.ethtool.tot_num_rules; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9d5f6e56188f..7569287f8f3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -56,12 +56,13 @@ #include "en/xdp.h" #include "lib/eq.h" #include "en/monitor_stats.h" -#include "en/reporter.h" +#include "en/health.h" #include "en/params.h" #include "en/xsk/umem.h" #include "en/xsk/setup.h" #include "en/xsk/rx.h" #include "en/xsk/tx.h" +#include "en/hv_vhca_stats.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) @@ -247,26 +248,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } -static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) -{ - switch (rq->wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return mlx5_wq_ll_get_size(&rq->mpwqe.wq); - default: - return mlx5_wq_cyc_get_size(&rq->wqe.wq); - } -} - -static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) -{ - switch (rq->wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return rq->mpwqe.wq.cur_sz; - default: - return rq->wqe.wq.cur_sz; - } -} - static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, struct mlx5e_channel *c) { @@ -382,6 +363,13 @@ static void mlx5e_free_di_list(struct mlx5e_rq *rq) kvfree(rq->wqe.di); } +static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work) +{ + struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work); + + mlx5e_reporter_rq_cqe_err(rq); +} + static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, @@ -418,6 +406,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->stats = &c->priv->channel_stats[c->ix].xskrq; else rq->stats = &c->priv->channel_stats[c->ix].rq; + INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work); rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; if (IS_ERR(rq->xdp_prog)) { @@ -720,8 +709,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq, return err; } -static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, - int next_state) +int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) { struct mlx5_core_dev *mdev = rq->mdev; @@ -829,10 +817,11 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); + mlx5e_reporter_rx_timeout(rq); return -ETIMEDOUT; } -static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) +void mlx5e_free_rx_descs(struct mlx5e_rq *rq) { __be16 wqe_ix_be; u16 wqe_ix; @@ -911,7 +900,7 @@ err_free_rq: return err; } -static void mlx5e_activate_rq(struct mlx5e_rq *rq) +void mlx5e_activate_rq(struct mlx5e_rq *rq) { set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); mlx5e_trigger_irq(&rq->channel->icosq); @@ -926,6 +915,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq) { cancel_work_sync(&rq->dim.work); + cancel_work_sync(&rq->channel->icosq.recover_work); + cancel_work_sync(&rq->recover_work); mlx5e_destroy_rq(rq); mlx5e_free_rx_descs(rq); mlx5e_free_rq(rq); @@ -1042,6 +1033,14 @@ static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) return 0; } +static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work) +{ + struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, + recover_work); + + mlx5e_reporter_icosq_cqe_err(sq); +} + static int mlx5e_alloc_icosq(struct mlx5e_channel *c, struct mlx5e_sq_param *param, struct mlx5e_icosq *sq) @@ -1064,6 +1063,8 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, if (err) goto err_sq_wq_destroy; + INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work); + return 0; err_sq_wq_destroy: @@ -1130,6 +1131,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; sq->stop_room = MLX5E_SQ_STOP_ROOM; INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); + if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) + set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); if (mlx5_accel_is_tls_device(c->priv->mdev)) { @@ -1312,7 +1315,6 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c, return 0; err_free_txqsq: - clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); mlx5e_free_txqsq(sq); return err; @@ -1377,7 +1379,7 @@ static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, recover_work); - mlx5e_tx_reporter_err_cqe(sq); + mlx5e_reporter_tx_err_cqe(sq); } int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, @@ -1393,7 +1395,6 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, csp.cqn = sq->cq.mcq.cqn; csp.wq_ctrl = &sq->wq_ctrl; csp.min_inline_mode = params->tx_min_inline_mode; - set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); if (err) goto err_free_icosq; @@ -1401,18 +1402,27 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, return 0; err_free_icosq: - clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); mlx5e_free_icosq(sq); return err; } -void mlx5e_close_icosq(struct mlx5e_icosq *sq) +void mlx5e_activate_icosq(struct mlx5e_icosq *icosq) { - struct mlx5e_channel *c = sq->channel; + set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); +} - clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); +void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq) +{ + struct mlx5e_channel *c = icosq->channel; + + clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); napi_synchronize(&c->napi); +} + +void mlx5e_close_icosq(struct mlx5e_icosq *sq) +{ + struct mlx5e_channel *c = sq->channel; mlx5e_destroy_sq(c->mdev, sq->sqn); mlx5e_free_icosq(sq); @@ -1430,7 +1440,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, return err; csp.tis_lst_sz = 1; - csp.tisn = c->priv->tisn[0]; /* tc = 0 */ + csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */ csp.cqn = sq->cq.mcq.cqn; csp.wq_ctrl = &sq->wq_ctrl; csp.min_inline_mode = sq->min_inline_mode; @@ -1680,7 +1690,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, for (tc = 0; tc < params->num_tc; tc++) { int txq_ix = c->ix + tc * priv->max_nch; - err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix, + err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, params, &cparam->sq, &c->sq[tc], tc); if (err) goto err_close_sqs; @@ -1914,6 +1924,13 @@ static void mlx5e_close_queues(struct mlx5e_channel *c) mlx5e_close_cq(&c->icosq.cq); } +static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix) +{ + u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id); + + return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev); +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_params *params, struct mlx5e_channel_param *cparam, @@ -1948,6 +1965,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; c->irq_desc = irq_to_desc(irq); + c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); err = mlx5e_alloc_xps_cpumask(c, params); if (err) @@ -1989,6 +2007,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) for (tc = 0; tc < c->num_tc; tc++) mlx5e_activate_txqsq(&c->sq[tc]); + mlx5e_activate_icosq(&c->icosq); mlx5e_activate_rq(&c->rq); netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix); @@ -2004,6 +2023,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) mlx5e_deactivate_xsk(c); mlx5e_deactivate_rq(&c->rq); + mlx5e_deactivate_icosq(&c->icosq); for (tc = 0; tc < c->num_tc; tc++) mlx5e_deactivate_txqsq(&c->sq[tc]); } @@ -2321,10 +2341,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } - if (!IS_ERR_OR_NULL(priv->tx_reporter)) - devlink_health_reporter_state_update(priv->tx_reporter, - DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); - + mlx5e_health_channels_update(priv); kvfree(cparam); return 0; @@ -3168,40 +3185,58 @@ void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn) mlx5_core_destroy_tis(mdev, tisn); } +void mlx5e_destroy_tises(struct mlx5e_priv *priv) +{ + int tc, i; + + for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) + for (tc = 0; tc < priv->profile->max_tc; tc++) + mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); +} + +static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1; +} + int mlx5e_create_tises(struct mlx5e_priv *priv) { + int tc, i; int err; - int tc; - for (tc = 0; tc < priv->profile->max_tc; tc++) { - u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; - void *tisc; + for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) { + for (tc = 0; tc < priv->profile->max_tc; tc++) { + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; + void *tisc; - tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); + tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); - MLX5_SET(tisc, tisc, prio, tc << 1); + MLX5_SET(tisc, tisc, prio, tc << 1); - err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[tc]); - if (err) - goto err_close_tises; + if (mlx5e_lag_should_assign_affinity(priv->mdev)) + MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1); + + err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]); + if (err) + goto err_close_tises; + } } return 0; err_close_tises: - for (tc--; tc >= 0; tc--) - mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); + for (; i >= 0; i--) { + for (tc--; tc >= 0; tc--) + mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); + tc = priv->profile->max_tc; + } return err; } static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) { - int tc; - - mlx5e_tx_reporter_destroy(priv); - for (tc = 0; tc < priv->profile->max_tc; tc++) - mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); + mlx5e_destroy_tises(priv); } static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv, @@ -3422,7 +3457,7 @@ out: #ifdef CONFIG_MLX5_ESWITCH static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, struct flow_cls_offload *cls_flower, - int flags) + unsigned long flags) { switch (cls_flower->command) { case FLOW_CLS_REPLACE: @@ -3442,12 +3477,12 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { + unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD); struct mlx5e_priv *priv = cb_priv; switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS | - MLX5E_TC_NIC_OFFLOAD); + return mlx5e_setup_tc_cls_flower(priv, type_data, flags); default: return -EOPNOTSUPP; } @@ -3463,11 +3498,15 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { #ifdef CONFIG_MLX5_ESWITCH - case TC_SETUP_BLOCK: + case TC_SETUP_BLOCK: { + struct flow_block_offload *f = type_data; + + f->unlocked_driver_cb = true; return flow_block_cb_setup_simple(type_data, &mlx5e_block_cb_list, mlx5e_setup_tc_block_cb, priv, priv, true); + } #endif case TC_SETUP_QDISC_MQPRIO: return mlx5e_setup_tc_mqprio(priv, type_data); @@ -3640,7 +3679,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); - if (!enable && mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD)) { + if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) { netdev_err(netdev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); return -EINVAL; @@ -3781,9 +3820,10 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); } if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) { - features &= ~NETIF_F_LRO; - if (params->lro_en) + if (features & NETIF_F_LRO) { netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); + features &= ~NETIF_F_LRO; + } } if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { @@ -3950,7 +3990,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: /* Disable CQE compression */ - netdev_warn(priv->netdev, "Disabling cqe compression"); + if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) + netdev_warn(priv->netdev, "Disabling RX cqe compression\n"); err = mlx5e_modify_rx_cqe_compression_locked(priv, false); if (err) { netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); @@ -4202,6 +4243,8 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, switch (proto) { case IPPROTO_GRE: + case IPPROTO_IPIP: + case IPPROTO_IPV6: return features; case IPPROTO_UDP: udph = udp_hdr(skb); @@ -4267,7 +4310,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) if (!netif_xmit_stopped(dev_queue)) continue; - if (mlx5e_tx_reporter_timeout(sq)) + if (mlx5e_reporter_tx_timeout(sq)) report_failed = true; } @@ -4535,7 +4578,7 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_tx_timeout = mlx5e_tx_timeout, .ndo_bpf = mlx5e_xdp, .ndo_xdp_xmit = mlx5e_xdp_xmit, - .ndo_xsk_async_xmit = mlx5e_xsk_async_xmit, + .ndo_xsk_wakeup = mlx5e_xsk_wakeup, #ifdef CONFIG_MLX5_EN_ARFS .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@ -4768,7 +4811,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); /* TX inline */ - params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); + mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); /* RSS */ mlx5e_build_rss_params(rss_params, params->num_channels); @@ -4838,7 +4881,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) || - MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { + mlx5e_any_tunnel_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO6; @@ -4853,7 +4896,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } - if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { + if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) { netdev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM; netdev->hw_enc_features |= NETIF_F_GSO_GRE | @@ -4862,6 +4905,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) NETIF_F_GSO_GRE_CSUM; } + if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) { + netdev->hw_features |= NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6; + netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6; + netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6; + } + netdev->hw_features |= NETIF_F_GSO_PARTIAL; netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4; netdev->hw_features |= NETIF_F_GSO_UDP_L4; @@ -4965,12 +5017,14 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_build_nic_netdev(netdev); mlx5e_build_tc2txq_maps(priv); + mlx5e_health_create_reporters(priv); return 0; } static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { + mlx5e_health_destroy_reporters(priv); mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); mlx5e_netdev_cleanup(priv->netdev, priv); @@ -5073,7 +5127,6 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_dcbnl_initialize(priv); #endif - mlx5e_tx_reporter_create(priv); return 0; } @@ -5097,6 +5150,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) if (mlx5e_monitor_counter_supported(priv)) mlx5e_monitor_counter_init(priv); + mlx5e_hv_vhca_stats_create(priv); if (netdev->reg_state != NETREG_REGISTERED) return; #ifdef CONFIG_MLX5_CORE_EN_DCB @@ -5129,6 +5183,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_hv_vhca_stats_destroy(priv); if (mlx5e_monitor_counter_supported(priv)) mlx5e_monitor_counter_cleanup(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index d0684fdb69e1..95892a3b63a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -38,6 +38,7 @@ #include <net/netevent.h> #include <net/arp.h> #include <net/devlink.h> +#include <net/ipv6_stubs.h> #include "eswitch.h" #include "en.h" @@ -46,6 +47,8 @@ #include "en/tc_tun.h" #include "fs_core.h" #include "lib/port_tun.h" +#define CREATE_TRACE_POINTS +#include "diag/en_rep_tracepoint.h" #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) @@ -389,24 +392,17 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = { .set_pauseparam = mlx5e_uplink_rep_set_pauseparam, }; -static int mlx5e_rep_get_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) +static void mlx5e_rep_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { - struct mlx5_eswitch *esw; struct mlx5e_priv *priv; u64 parent_id; priv = netdev_priv(dev); - esw = priv->mdev->priv.eswitch; - - if (esw->mode == MLX5_ESWITCH_NONE) - return -EOPNOTSUPP; parent_id = mlx5_query_nic_system_image_guid(priv->mdev); ppid->id_len = sizeof(parent_id); memcpy(ppid->id, &parent_id, sizeof(parent_id)); - - return 0; } static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, @@ -504,16 +500,18 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) mlx5e_sqs2vport_stop(esw, rep); } +static unsigned long mlx5e_rep_ipv6_interval(void) +{ + if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl) + return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME); + + return ~0UL; +} + static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) { -#if IS_ENABLED(CONFIG_IPV6) - unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms, - DELAY_PROBE_TIME); -#else - unsigned long ipv6_interval = ~0UL; -#endif - unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, - DELAY_PROBE_TIME); + unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); + unsigned long ipv6_interval = mlx5e_rep_ipv6_interval(); struct net_device *netdev = rpriv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); @@ -531,47 +529,97 @@ void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv) neigh_update->min_interval); } +static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe) +{ + return refcount_inc_not_zero(&nhe->refcnt); +} + +static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe); + +static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe) +{ + if (refcount_dec_and_test(&nhe->refcnt)) { + mlx5e_rep_neigh_entry_remove(nhe); + kfree_rcu(nhe, rcu); + } +} + +static struct mlx5e_neigh_hash_entry * +mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv, + struct mlx5e_neigh_hash_entry *nhe) +{ + struct mlx5e_neigh_hash_entry *next = NULL; + + rcu_read_lock(); + + for (next = nhe ? + list_next_or_null_rcu(&rpriv->neigh_update.neigh_list, + &nhe->neigh_list, + struct mlx5e_neigh_hash_entry, + neigh_list) : + list_first_or_null_rcu(&rpriv->neigh_update.neigh_list, + struct mlx5e_neigh_hash_entry, + neigh_list); + next; + next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list, + &next->neigh_list, + struct mlx5e_neigh_hash_entry, + neigh_list)) + if (mlx5e_rep_neigh_entry_hold(next)) + break; + + rcu_read_unlock(); + + if (nhe) + mlx5e_rep_neigh_entry_release(nhe); + + return next; +} + static void mlx5e_rep_neigh_stats_work(struct work_struct *work) { struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv, neigh_update.neigh_stats_work.work); struct net_device *netdev = rpriv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_neigh_hash_entry *nhe; + struct mlx5e_neigh_hash_entry *nhe = NULL; rtnl_lock(); if (!list_empty(&rpriv->neigh_update.neigh_list)) mlx5e_rep_queue_neigh_stats_work(priv); - list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list) + while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL) mlx5e_tc_update_neigh_used_value(nhe); rtnl_unlock(); } -static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe) -{ - refcount_inc(&nhe->refcnt); -} - -static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe) -{ - if (refcount_dec_and_test(&nhe->refcnt)) - kfree(nhe); -} - static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e, bool neigh_connected, unsigned char ha[ETH_ALEN]) { struct ethhdr *eth = (struct ethhdr *)e->encap_header; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + bool encap_connected; + LIST_HEAD(flow_list); ASSERT_RTNL(); + /* wait for encap to be fully initialized */ + wait_for_completion(&e->res_ready); + + mutex_lock(&esw->offloads.encap_tbl_lock); + encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID); + if (e->compl_result || (encap_connected == neigh_connected && + ether_addr_equal(e->h_dest, ha))) + goto unlock; + + mlx5e_take_all_encap_flows(e, &flow_list); + if ((e->flags & MLX5_ENCAP_ENTRY_VALID) && (!neigh_connected || !ether_addr_equal(e->h_dest, ha))) - mlx5e_tc_encap_flows_del(priv, e); + mlx5e_tc_encap_flows_del(priv, e, &flow_list); if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { ether_addr_copy(e->h_dest, ha); @@ -581,8 +629,11 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, */ ether_addr_copy(eth->h_source, e->route_dev->dev_addr); - mlx5e_tc_encap_flows_add(priv, e); + mlx5e_tc_encap_flows_add(priv, e, &flow_list); } +unlock: + mutex_unlock(&esw->offloads.encap_tbl_lock); + mlx5e_put_encap_flow_list(priv, &flow_list); } static void mlx5e_rep_neigh_update(struct work_struct *work) @@ -594,7 +645,6 @@ static void mlx5e_rep_neigh_update(struct work_struct *work) unsigned char ha[ETH_ALEN]; struct mlx5e_priv *priv; bool neigh_connected; - bool encap_connected; u8 nud_state, dead; rtnl_lock(); @@ -612,13 +662,15 @@ static void mlx5e_rep_neigh_update(struct work_struct *work) neigh_connected = (nud_state & NUD_VALID) && !dead; + trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected); + list_for_each_entry(e, &nhe->encap_list, encap_list) { - encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID); - priv = netdev_priv(e->out_dev); + if (!mlx5e_encap_take(e)) + continue; - if (encap_connected != neigh_connected || - !ether_addr_equal(e->h_dest, ha)) - mlx5e_rep_update_flows(priv, e, neigh_connected, ha); + priv = netdev_priv(e->out_dev); + mlx5e_rep_update_flows(priv, e, neigh_connected, ha); + mlx5e_encap_put(priv, e); } mlx5e_rep_neigh_entry_release(nhe); rtnl_unlock(); @@ -659,8 +711,8 @@ mlx5e_rep_indr_offload(struct net_device *netdev, struct flow_cls_offload *flower, struct mlx5e_rep_indr_block_priv *indr_priv) { + unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev); - int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD; int err = 0; switch (flower->command) { @@ -714,6 +766,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; + f->unlocked_driver_cb = true; f->driver_block_list = &mlx5e_block_cb_list; switch (f->command) { @@ -722,10 +775,6 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, if (indr_priv) return -EEXIST; - if (flow_block_cb_is_busy(mlx5e_rep_indr_setup_block_cb, - indr_priv, &mlx5e_block_cb_list)) - return -EBUSY; - indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL); if (!indr_priv) return -ENOMEM; @@ -785,9 +834,9 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, { int err; - err = __tc_indr_block_cb_register(netdev, rpriv, - mlx5e_rep_indr_setup_tc_cb, - rpriv); + err = __flow_indr_block_cb_register(netdev, rpriv, + mlx5e_rep_indr_setup_tc_cb, + rpriv); if (err) { struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); @@ -800,8 +849,8 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, struct net_device *netdev) { - __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, - rpriv); + __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, + rpriv); } static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, @@ -827,6 +876,28 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, return NOTIFY_OK; } +static void +mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv, + struct mlx5e_neigh_hash_entry *nhe, + struct neighbour *n) +{ + /* Take a reference to ensure the neighbour and mlx5 encap + * entry won't be destructed until we drop the reference in + * delayed work. + */ + neigh_hold(n); + + /* This assignment is valid as long as the the neigh reference + * is taken + */ + nhe->n = n; + + if (!queue_work(priv->wq, &nhe->neigh_update_work)) { + mlx5e_rep_neigh_entry_release(nhe); + neigh_release(n); + } +} + static struct mlx5e_neigh_hash_entry * mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, struct mlx5e_neigh *m_neigh); @@ -849,7 +920,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, case NETEVENT_NEIGH_UPDATE: n = ptr; #if IS_ENABLED(CONFIG_IPV6) - if (n->tbl != &nd_tbl && n->tbl != &arp_tbl) + if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) #else if (n->tbl != &arp_tbl) #endif @@ -859,34 +930,13 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, m_neigh.family = n->ops->family; memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len); - /* We are in atomic context and can't take RTNL mutex, so use - * spin_lock_bh to lookup the neigh table. bh is used since - * netevent can be called from a softirq context. - */ - spin_lock_bh(&neigh_update->encap_lock); + rcu_read_lock(); nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh); - if (!nhe) { - spin_unlock_bh(&neigh_update->encap_lock); + rcu_read_unlock(); + if (!nhe) return NOTIFY_DONE; - } - - /* This assignment is valid as long as the the neigh reference - * is taken - */ - nhe->n = n; - - /* Take a reference to ensure the neighbour and mlx5 encap - * entry won't be destructed until we drop the reference in - * delayed work. - */ - neigh_hold(n); - mlx5e_rep_neigh_entry_hold(nhe); - if (!queue_work(priv->wq, &nhe->neigh_update_work)) { - mlx5e_rep_neigh_entry_release(nhe); - neigh_release(n); - } - spin_unlock_bh(&neigh_update->encap_lock); + mlx5e_rep_queue_neigh_update_work(priv, nhe, n); break; case NETEVENT_DELAY_PROBE_TIME_UPDATE: @@ -897,25 +947,21 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, * done per device delay prob time parameter. */ #if IS_ENABLED(CONFIG_IPV6) - if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl)) + if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) #else if (!p->dev || p->tbl != &arp_tbl) #endif return NOTIFY_DONE; - /* We are in atomic context and can't take RTNL mutex, - * so use spin_lock_bh to walk the neigh list and look for - * the relevant device. bh is used since netevent can be - * called from a softirq context. - */ - spin_lock_bh(&neigh_update->encap_lock); - list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) { + rcu_read_lock(); + list_for_each_entry_rcu(nhe, &neigh_update->neigh_list, + neigh_list) { if (p->dev == nhe->m_neigh.dev) { found = true; break; } } - spin_unlock_bh(&neigh_update->encap_lock); + rcu_read_unlock(); if (!found) return NOTIFY_DONE; @@ -946,7 +992,7 @@ static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) return err; INIT_LIST_HEAD(&neigh_update->neigh_list); - spin_lock_init(&neigh_update->encap_lock); + mutex_init(&neigh_update->encap_lock); INIT_DELAYED_WORK(&neigh_update->neigh_stats_work, mlx5e_rep_neigh_stats_work); mlx5e_rep_neigh_update_init_interval(rpriv); @@ -973,6 +1019,7 @@ static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work); + mutex_destroy(&neigh_update->encap_lock); rhashtable_destroy(&neigh_update->neigh_ht); } @@ -988,28 +1035,27 @@ static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv, if (err) return err; - list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list); + list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list); return err; } -static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv, - struct mlx5e_neigh_hash_entry *nhe) +static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe) { - struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv; - spin_lock_bh(&rpriv->neigh_update.encap_lock); + mutex_lock(&rpriv->neigh_update.encap_lock); - list_del(&nhe->neigh_list); + list_del_rcu(&nhe->neigh_list); rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht, &nhe->rhash_node, mlx5e_neigh_ht_params); - spin_unlock_bh(&rpriv->neigh_update.encap_lock); + mutex_unlock(&rpriv->neigh_update.encap_lock); } -/* This function must only be called under RTNL lock or under the - * representor's encap_lock in case RTNL mutex can't be held. +/* This function must only be called under the representor's encap_lock or + * inside rcu read lock section. */ static struct mlx5e_neigh_hash_entry * mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, @@ -1017,9 +1063,11 @@ mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, { struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; + struct mlx5e_neigh_hash_entry *nhe; - return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh, - mlx5e_neigh_ht_params); + nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh, + mlx5e_neigh_ht_params); + return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL; } static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv, @@ -1032,8 +1080,10 @@ static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv, if (!*nhe) return -ENOMEM; + (*nhe)->priv = priv; memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh)); INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update); + spin_lock_init(&(*nhe)->encap_list_lock); INIT_LIST_HEAD(&(*nhe)->encap_list); refcount_set(&(*nhe)->refcnt, 1); @@ -1047,19 +1097,6 @@ out_free: return err; } -static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv, - struct mlx5e_neigh_hash_entry *nhe) -{ - /* The neigh hash entry must be removed from the hash table regardless - * of the reference count value, so it won't be found by the next - * neigh notification call. The neigh hash entry reference count is - * incremented only during creation and neigh notification calls and - * protects from freeing the nhe struct. - */ - mlx5e_rep_neigh_entry_remove(priv, nhe); - mlx5e_rep_neigh_entry_release(nhe); -} - int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) { @@ -1072,16 +1109,26 @@ int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type); if (err) return err; + + mutex_lock(&rpriv->neigh_update.encap_lock); nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); if (!nhe) { err = mlx5e_rep_neigh_entry_create(priv, e, &nhe); if (err) { + mutex_unlock(&rpriv->neigh_update.encap_lock); mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type); return err; } } - list_add(&e->encap_list, &nhe->encap_list); + + e->nhe = nhe; + spin_lock(&nhe->encap_list_lock); + list_add_rcu(&e->encap_list, &nhe->encap_list); + spin_unlock(&nhe->encap_list_lock); + + mutex_unlock(&rpriv->neigh_update.encap_lock); + return 0; } @@ -1091,13 +1138,16 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy; - struct mlx5e_neigh_hash_entry *nhe; - list_del(&e->encap_list); - nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); + if (!e->nhe) + return; + + spin_lock(&e->nhe->encap_list_lock); + list_del_rcu(&e->encap_list); + spin_unlock(&e->nhe->encap_list_lock); - if (list_empty(&nhe->encap_list)) - mlx5e_rep_neigh_entry_destroy(priv, nhe); + mlx5e_rep_neigh_entry_release(e->nhe); + e->nhe = NULL; mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type); } @@ -1160,15 +1210,34 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, } } +static +int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) +{ + switch (ma->command) { + case TC_CLSMATCHALL_REPLACE: + return mlx5e_tc_configure_matchall(priv, ma); + case TC_CLSMATCHALL_DESTROY: + return mlx5e_tc_delete_matchall(priv, ma); + case TC_CLSMATCHALL_STATS: + mlx5e_tc_stats_matchall(priv, ma); + return 0; + default: + return -EOPNOTSUPP; + } +} + static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { + unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); struct mlx5e_priv *priv = cb_priv; switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS | - MLX5E_TC_ESW_OFFLOAD); + return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags); + case TC_SETUP_CLSMATCHALL: + return mlx5e_rep_setup_tc_cls_matchall(priv, type_data); default: return -EOPNOTSUPP; } @@ -1180,9 +1249,11 @@ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct mlx5e_priv *priv = netdev_priv(dev); + struct flow_block_offload *f = type_data; switch (type) { case TC_SETUP_BLOCK: + f->unlocked_driver_cb = true; return flow_block_cb_setup_simple(type_data, &mlx5e_rep_block_cb_list, mlx5e_rep_setup_tc_cb, @@ -1553,7 +1624,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_rep_uplink_priv *uplink_priv; - int tc, err; + int err; err = mlx5e_create_tises(priv); if (err) { @@ -1564,6 +1635,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { uplink_priv = &rpriv->uplink_priv; + mutex_init(&uplink_priv->unready_flows_lock); INIT_LIST_HEAD(&uplink_priv->unready_flows); /* init shared tc flow table */ @@ -1588,18 +1660,15 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) tc_esw_cleanup: mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht); destroy_tises: - for (tc = 0; tc < priv->profile->max_tc; tc++) - mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); + mlx5e_destroy_tises(priv); return err; } static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - int tc; - for (tc = 0; tc < priv->profile->max_tc; tc++) - mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); + mlx5e_destroy_tises(priv); if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { /* clean indirect TC block notifications */ @@ -1608,6 +1677,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) /* delete shared tc flow table */ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht); + mutex_destroy(&rpriv->uplink_priv.unready_flows_lock); } } @@ -1731,37 +1801,46 @@ is_devlink_port_supported(const struct mlx5_core_dev *dev, mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport); } +static unsigned int +vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num) +{ + return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num; +} + static int register_devlink_port(struct mlx5_core_dev *dev, struct mlx5e_rep_priv *rpriv) { struct devlink *devlink = priv_to_devlink(dev); struct mlx5_eswitch_rep *rep = rpriv->rep; struct netdev_phys_item_id ppid = {}; - int ret; + unsigned int dl_port_index = 0; if (!is_devlink_port_supported(dev, rpriv)) return 0; - ret = mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid); - if (ret) - return ret; + mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid); - if (rep->vport == MLX5_VPORT_UPLINK) + if (rep->vport == MLX5_VPORT_UPLINK) { devlink_port_attrs_set(&rpriv->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, PCI_FUNC(dev->pdev->devfn), false, 0, &ppid.id[0], ppid.id_len); - else if (rep->vport == MLX5_VPORT_PF) + dl_port_index = vport_to_devlink_port_index(dev, rep->vport); + } else if (rep->vport == MLX5_VPORT_PF) { devlink_port_attrs_pci_pf_set(&rpriv->dl_port, &ppid.id[0], ppid.id_len, dev->pdev->devfn); - else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) + dl_port_index = rep->vport; + } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, + rpriv->rep->vport)) { devlink_port_attrs_pci_vf_set(&rpriv->dl_port, &ppid.id[0], ppid.id_len, dev->pdev->devfn, rep->vport - 1); + dl_port_index = vport_to_devlink_port_index(dev, rep->vport); + } - return devlink_port_register(devlink, &rpriv->dl_port, rep->vport); + return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index); } static void unregister_devlink_port(struct mlx5_core_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index c56e6ee4350c..31f83c8adcc9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -35,6 +35,7 @@ #include <net/ip_tunnels.h> #include <linux/rhashtable.h> +#include <linux/mutex.h> #include "eswitch.h" #include "en.h" #include "lib/port_tun.h" @@ -48,7 +49,7 @@ struct mlx5e_neigh_update_table { */ struct list_head neigh_list; /* protect lookup/remove operations */ - spinlock_t encap_lock; + struct mutex encap_lock; struct notifier_block netevent_nb; struct delayed_work neigh_stats_work; unsigned long min_interval; /* jiffies */ @@ -75,6 +76,8 @@ struct mlx5_rep_uplink_priv { struct mlx5_tun_entropy tun_entropy; + /* protects unready_flows */ + struct mutex unready_flows_lock; struct list_head unready_flows; struct work_struct reoffload_flows_work; }; @@ -86,6 +89,7 @@ struct mlx5e_rep_priv { struct mlx5_flow_handle *vport_rx_rule; struct list_head vport_sqs_list; struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */ + struct rtnl_link_stats64 prev_vf_vport_stats; struct devlink_port dl_port; }; @@ -107,6 +111,7 @@ struct mlx5e_neigh { struct mlx5e_neigh_hash_entry { struct rhash_head rhash_node; struct mlx5e_neigh m_neigh; + struct mlx5e_priv *priv; /* Save the neigh hash entry in a list on the representor in * addition to the hash table. In order to iterate easily over the @@ -114,6 +119,8 @@ struct mlx5e_neigh_hash_entry { */ struct list_head neigh_list; + /* protects encap list */ + spinlock_t encap_list_lock; /* encap list sharing the same neigh */ struct list_head encap_list; @@ -134,6 +141,8 @@ struct mlx5e_neigh_hash_entry { * 'used' value and avoid neigh deleting by the kernel. */ unsigned long reported_lastuse; + + struct rcu_head rcu; }; enum { @@ -142,6 +151,8 @@ enum { }; struct mlx5e_encap_entry { + /* attached neigh hash entry */ + struct mlx5e_neigh_hash_entry *nhe; /* neigh hash entry list of encaps sharing the same neigh */ struct list_head encap_list; struct mlx5e_neigh m_neigh; @@ -150,7 +161,7 @@ struct mlx5e_encap_entry { */ struct hlist_node encap_hlist; struct list_head flows; - u32 encap_id; + struct mlx5_pkt_reformat *pkt_reformat; const struct ip_tunnel_info *tun_info; unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ @@ -161,6 +172,10 @@ struct mlx5e_encap_entry { u8 flags; char *encap_header; int encap_size; + refcount_t refcnt; + struct completion res_ready; + int compl_result; + struct rcu_head rcu; }; struct mlx5e_rep_sq { @@ -168,7 +183,6 @@ struct mlx5e_rep_sq { struct list_head list; }; -void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev); void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev); void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev); bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index ac6e586d403d..d6a547238de0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -48,6 +48,7 @@ #include "lib/clock.h" #include "en/xdp.h" #include "en/xsk/rx.h" +#include "en/health.h" static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) { @@ -615,6 +616,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { netdev_WARN_ONCE(cq->channel->netdev, "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) + queue_work(cq->channel->priv->wq, &sq->recover_work); break; } do { @@ -692,8 +695,11 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk; rq->mpwqe.actual_wq_head = head; - /* If XSK Fill Ring doesn't have enough frames, busy poll by - * rescheduling the NAPI poll. + /* If XSK Fill Ring doesn't have enough frames, report the error, so + * that one of the actions can be performed: + * 1. If need_wakeup is used, signal that the application has to kick + * the driver when it refills the Fill Ring. + * 2. Otherwise, busy poll by rescheduling the NAPI poll. */ if (unlikely(alloc_err == -ENOMEM && rq->umem)) return true; @@ -859,13 +865,24 @@ tail_padding_csum(struct sk_buff *skb, int offset, } static void -mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, - struct mlx5e_rq_stats *stats) +mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto, + struct mlx5e_rq_stats *stats) { struct ipv6hdr *ip6; struct iphdr *ip4; int pkt_len; + /* Fixup vlan headers, if any */ + if (network_depth > ETH_HLEN) + /* CQE csum is calculated from the IP header and does + * not cover VLAN headers (if present). This will add + * the checksum manually. + */ + skb->csum = csum_partial(skb->data + ETH_HLEN, + network_depth - ETH_HLEN, + skb->csum); + + /* Fixup tail padding, if any */ switch (proto) { case htons(ETH_P_IP): ip4 = (struct iphdr *)(skb->data + network_depth); @@ -931,16 +948,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; /* CQE csum covers all received bytes */ /* csum might need some fixups ...*/ - if (network_depth > ETH_HLEN) - /* CQE csum is calculated from the IP header and does - * not cover VLAN headers (if present). This will add - * the checksum manually. - */ - skb->csum = csum_partial(skb->data + ETH_HLEN, - network_depth - ETH_HLEN, - skb->csum); - - mlx5e_skb_padding_csum(skb, network_depth, proto, stats); + mlx5e_skb_csum_fixup(skb, network_depth, proto, stats); return; } @@ -1065,11 +1073,6 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, prefetchw(va); /* xdp_frame data area */ prefetch(data); - if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { - rq->stats->wqe_err++; - return NULL; - } - rcu_read_lock(); consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false); rcu_read_unlock(); @@ -1097,11 +1100,6 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u16 byte_cnt = cqe_bcnt - headlen; struct sk_buff *skb; - if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { - rq->stats->wqe_err++; - return NULL; - } - /* XDP is not supported in this configuration, as incoming packets * might spread among multiple pages. */ @@ -1135,6 +1133,15 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, return skb; } +static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; + + if (cqe_syndrome_needs_recover(err_cqe->syndrome) && + !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) + queue_work(rq->channel->priv->wq, &rq->recover_work); +} + void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; @@ -1147,6 +1154,12 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wi = get_frag(rq, ci); cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { + trigger_report(rq, cqe); + rq->stats->wqe_err++; + goto free_wqe; + } + skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, mlx5e_skb_from_cqe_linear, mlx5e_skb_from_cqe_nonlinear, @@ -1188,6 +1201,11 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wi = get_frag(rq, ci); cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { + rq->stats->wqe_err++; + goto free_wqe; + } + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ @@ -1322,7 +1340,8 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wi->consumed_strides += cstrides; - if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { + trigger_report(rq, cqe); rq->stats->wqe_err++; goto mpwrq_cqe_out; } @@ -1498,6 +1517,11 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wi = get_frag(rq, ci); cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { + rq->stats->wqe_err++; + goto wq_free_wqe; + } + skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, mlx5e_skb_from_cqe_linear, mlx5e_skb_from_cqe_nonlinear, @@ -1533,26 +1557,27 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wi = get_frag(rq, ci); cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { + rq->stats->wqe_err++; + goto wq_free_wqe; + } + skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, mlx5e_skb_from_cqe_linear, mlx5e_skb_from_cqe_nonlinear, rq, cqe, wi, cqe_bcnt); - if (unlikely(!skb)) { - /* a DROP, save the page-reuse checks */ - mlx5e_free_rx_wqe(rq, wi, true); - goto wq_cyc_pop; - } + if (unlikely(!skb)) /* a DROP, save the page-reuse checks */ + goto wq_free_wqe; + skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); - if (unlikely(!skb)) { - mlx5e_free_rx_wqe(rq, wi, true); - goto wq_cyc_pop; - } + if (unlikely(!skb)) + goto wq_free_wqe; mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); +wq_free_wqe: mlx5e_free_rx_wqe(rq, wi, true); -wq_cyc_pop: mlx5_wq_cyc_pop(wq); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 57f9f346d213..ac6fdcda7019 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -74,6 +74,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, @@ -90,6 +91,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, @@ -107,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, @@ -200,6 +203,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_xdp_tx_xmit += xdpsq_stats->xmit; s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; + s->rx_xdp_tx_nops += xdpsq_stats->nops; s->rx_xdp_tx_full += xdpsq_stats->full; s->rx_xdp_tx_err += xdpsq_stats->err; s->rx_xdp_tx_cqe += xdpsq_stats->cqes; @@ -217,6 +221,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_waive += rq_stats->cache_waive; s->rx_congst_umr += rq_stats->congst_umr; s->rx_arfs_err += rq_stats->arfs_err; + s->rx_recover += rq_stats->recover; s->ch_events += ch_stats->events; s->ch_poll += ch_stats->poll; s->ch_arm += ch_stats->arm; @@ -227,6 +232,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_xdp_xmit += xdpsq_red_stats->xmit; s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; + s->tx_xdp_nops += xdpsq_red_stats->nops; s->tx_xdp_full += xdpsq_red_stats->full; s->tx_xdp_err += xdpsq_red_stats->err; s->tx_xdp_cqes += xdpsq_red_stats->cqes; @@ -363,17 +369,27 @@ static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv) } #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) -static const struct counter_desc vnic_env_stats_desc[] = { +static const struct counter_desc vnic_env_stats_steer_desc[] = { { "rx_steer_missed_packets", VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) }, }; -#define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc) +static const struct counter_desc vnic_env_stats_dev_oob_desc[] = { + { "dev_internal_queue_oob", + VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) }, +}; + +#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \ + (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \ + ARRAY_SIZE(vnic_env_stats_steer_desc) : 0) +#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \ + (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \ + ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0) static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv) { - return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ? - NUM_VNIC_ENV_COUNTERS : 0; + return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) + + NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); } static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data, @@ -381,12 +397,13 @@ static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data, { int i; - if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) - return idx; + for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vnic_env_stats_steer_desc[i].format); - for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++) + for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - vnic_env_stats_desc[i].format); + vnic_env_stats_dev_oob_desc[i].format); return idx; } @@ -395,12 +412,13 @@ static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data, { int i; - if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) - return idx; - - for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++) + for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out, - vnic_env_stats_desc, i); + vnic_env_stats_steer_desc, i); + + for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) + data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, + vnic_env_stats_dev_oob_desc, i); return idx; } @@ -963,6 +981,147 @@ static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); } +#define PPORT_PER_TC_PRIO_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_per_tc_prio_grp_data_layout.c##_high) + +static const struct counter_desc pport_per_tc_prio_stats_desc[] = { + { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) }, +}; + +#define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc) + +#define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high) + +static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = { + { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) }, + { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) }, +}; + +#define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \ + ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc) + +static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return 0; + + return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO; +} + +static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *priv, + u8 *data, int idx) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int i, prio; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return idx; + + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_tc_prio_stats_desc[i].format, prio); + for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_tc_congest_prio_stats_desc[i].format, prio); + } + + return idx; +} + +static int mlx5e_grp_per_port_buffer_congest_fill_stats(struct mlx5e_priv *priv, + u64 *data, int idx) +{ + struct mlx5e_pport_stats *pport = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + int i, prio; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return idx; + + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio], + pport_per_tc_prio_stats_desc, i); + for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio], + pport_per_tc_congest_prio_stats_desc, i); + } + + return idx; +} + +static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + void *out; + int prio; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return; + + MLX5_SET(ppcnt_reg, in, pnat, 2); + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + out = pstats->per_tc_prio_counters[prio]; + MLX5_SET(ppcnt_reg, in, prio_tc, prio); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + } +} + +static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return 0; + + return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO; +} + +static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + void *out; + int prio; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return; + + MLX5_SET(ppcnt_reg, in, pnat, 2); + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + out = pstats->per_tc_congest_prio_counters[prio]; + MLX5_SET(ppcnt_reg, in, prio_tc, prio); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + } +} + +static int mlx5e_grp_per_port_buffer_congest_get_num_stats(struct mlx5e_priv *priv) +{ + return mlx5e_grp_per_tc_prio_get_num_stats(priv) + + mlx5e_grp_per_tc_congest_prio_get_num_stats(priv); +} + +static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *priv) +{ + mlx5e_grp_per_tc_prio_update_stats(priv); + mlx5e_grp_per_tc_congest_prio_update_stats(priv); +} + #define PPORT_PER_PRIO_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_per_prio_grp_data_layout.c##_high) @@ -1294,6 +1453,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, }; static const struct counter_desc sq_stats_desc[] = { @@ -1331,6 +1491,7 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = { { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, @@ -1340,6 +1501,7 @@ static const struct counter_desc xdpsq_stats_desc[] = { { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, @@ -1589,7 +1751,13 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .get_num_stats = mlx5e_grp_channels_get_num_stats, .fill_strings = mlx5e_grp_channels_fill_strings, .fill_stats = mlx5e_grp_channels_fill_stats, - } + }, + { + .get_num_stats = mlx5e_grp_per_port_buffer_congest_get_num_stats, + .fill_strings = mlx5e_grp_per_port_buffer_congest_fill_strings, + .fill_stats = mlx5e_grp_per_port_buffer_congest_fill_stats, + .update_stats = mlx5e_grp_per_port_buffer_congest_update_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 76ac111e14d0..79f261bf86ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -81,6 +81,7 @@ struct mlx5e_sw_stats { u64 rx_xdp_tx_xmit; u64 rx_xdp_tx_mpwqe; u64 rx_xdp_tx_inlnw; + u64 rx_xdp_tx_nops; u64 rx_xdp_tx_full; u64 rx_xdp_tx_err; u64 rx_xdp_tx_cqe; @@ -97,6 +98,7 @@ struct mlx5e_sw_stats { u64 tx_xdp_xmit; u64 tx_xdp_mpwqe; u64 tx_xdp_inlnw; + u64 tx_xdp_nops; u64 tx_xdp_full; u64 tx_xdp_err; u64 tx_xdp_cqes; @@ -114,6 +116,7 @@ struct mlx5e_sw_stats { u64 rx_cache_waive; u64 rx_congst_umr; u64 rx_arfs_err; + u64 rx_recover; u64 ch_events; u64 ch_poll; u64 ch_arm; @@ -204,6 +207,8 @@ struct mlx5e_pport_stats { __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; }; #define PCIE_PERF_GET(pcie_stats, c) \ @@ -247,6 +252,7 @@ struct mlx5e_rq_stats { u64 cache_waive; u64 congst_umr; u64 arfs_err; + u64 recover; }; struct mlx5e_sq_stats { @@ -288,6 +294,7 @@ struct mlx5e_xdpsq_stats { u64 xmit; u64 mpwqe; u64 inlnw; + u64 nops; u64 full; u64 err; /* dirtied @completion */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 00b2d4a86159..da7555fdb4d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -38,6 +38,8 @@ #include <linux/mlx5/fs.h> #include <linux/mlx5/device.h> #include <linux/rhashtable.h> +#include <linux/refcount.h> +#include <linux/completion.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_tunnel_key.h> @@ -54,30 +56,32 @@ #include "en/tc_tun.h" #include "lib/devcom.h" #include "lib/geneve.h" +#include "diag/en_tc_tracepoint.h" struct mlx5_nic_flow_attr { u32 action; u32 flow_tag; - u32 mod_hdr_id; + struct mlx5_modify_hdr *modify_hdr; u32 hairpin_tirn; u8 match_level; struct mlx5_flow_table *hairpin_ft; struct mlx5_fc *counter; }; -#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1) +#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1) enum { - MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS, - MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS, - MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD, - MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD, - MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE), - MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1), - MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2), - MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3), - MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4), - MLX5E_TC_FLOW_NOT_READY = BIT(MLX5E_TC_FLOW_BASE + 5), + MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, + MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, + MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE, + MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1, + MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2, + MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3, + MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4, + MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5, + MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6, }; #define MLX5E_TC_MAX_SPLITS 1 @@ -100,6 +104,7 @@ enum { * container_of(helper item, containing struct type, helper field[index]) */ struct encap_flow_item { + struct mlx5e_encap_entry *e; /* attached encap instance */ struct list_head list; int index; }; @@ -108,7 +113,7 @@ struct mlx5e_tc_flow { struct rhash_head node; struct mlx5e_priv *priv; u64 cookie; - u16 flags; + unsigned long flags; struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; /* Flow can be associated with multiple encap IDs. * The number of encaps is bounded by the number of supported @@ -116,10 +121,17 @@ struct mlx5e_tc_flow { */ struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5e_tc_flow *peer_flow; + struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ + struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head peer; /* flows with peer flow */ struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */ + int tmp_efi_index; + struct list_head tmp_list; /* temporary flow list used by neigh update */ + refcount_t refcnt; + struct rcu_head rcu_head; + struct completion init_done; union { struct mlx5_esw_flow_attr esw_attr[0]; struct mlx5_nic_flow_attr nic_attr[0]; @@ -157,12 +169,20 @@ struct mlx5e_hairpin_entry { /* a node of a hash table which keeps all the hairpin entries */ struct hlist_node hairpin_hlist; + /* protects flows list */ + spinlock_t flows_lock; /* flows sharing the same hairpin */ struct list_head flows; + /* hpe's that were not fully initialized when dead peer update event + * function traversed them. + */ + struct list_head dead_peer_wait_list; u16 peer_vhca_id; u8 prio; struct mlx5e_hairpin *hp; + refcount_t refcnt; + struct completion res_ready; }; struct mod_hdr_key { @@ -174,16 +194,93 @@ struct mlx5e_mod_hdr_entry { /* a node of a hash table which keeps all the mod_hdr entries */ struct hlist_node mod_hdr_hlist; + /* protects flows list */ + spinlock_t flows_lock; /* flows sharing the same mod_hdr entry */ struct list_head flows; struct mod_hdr_key key; - u32 mod_hdr_id; + struct mlx5_modify_hdr *modify_hdr; + + refcount_t refcnt; + struct completion res_ready; + int compl_result; }; #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) +static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); + +static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) +{ + if (!flow || !refcount_inc_not_zero(&flow->refcnt)) + return ERR_PTR(-EINVAL); + return flow; +} + +static void mlx5e_flow_put(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + if (refcount_dec_and_test(&flow->refcnt)) { + mlx5e_tc_del_flow(priv, flow); + kfree_rcu(flow, rcu_head); + } +} + +static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + /* Complete all memory stores before setting bit. */ + smp_mb__before_atomic(); + set_bit(flag, &flow->flags); +} + +#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag) + +static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow, + unsigned long flag) +{ + /* test_and_set_bit() provides all necessary barriers */ + return test_and_set_bit(flag, &flow->flags); +} + +#define flow_flag_test_and_set(flow, flag) \ + __flow_flag_test_and_set(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + +static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + /* Complete all memory stores before clearing bit. */ + smp_mb__before_atomic(); + clear_bit(flag, &flow->flags); +} + +#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + +static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + bool ret = test_bit(flag, &flow->flags); + + /* Read fields of flow structure only after checking flags. */ + smp_mb__after_atomic(); + return ret; +} + +#define flow_flag_test(flow, flag) __flow_flag_test(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + +static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) +{ + return flow_flag_test(flow, ESWITCH); +} + +static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) +{ + return flow_flag_test(flow, OFFLOADED); +} + static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key) { return jhash(key->actions, @@ -199,15 +296,62 @@ static inline int cmp_mod_hdr_info(struct mod_hdr_key *a, return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ); } +static struct mod_hdr_tbl * +get_mod_hdr_table(struct mlx5e_priv *priv, int namespace) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr : + &priv->fs.tc.mod_hdr; +} + +static struct mlx5e_mod_hdr_entry * +mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key) +{ + struct mlx5e_mod_hdr_entry *mh, *found = NULL; + + hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) { + if (!cmp_mod_hdr_info(&mh->key, key)) { + refcount_inc(&mh->refcnt); + found = mh; + break; + } + } + + return found; +} + +static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv, + struct mlx5e_mod_hdr_entry *mh, + int namespace) +{ + struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace); + + if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock)) + return; + hash_del(&mh->mod_hdr_hlist); + mutex_unlock(&tbl->lock); + + WARN_ON(!list_empty(&mh->flows)); + if (mh->compl_result > 0) + mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr); + + kfree(mh); +} + +static int get_flow_name_space(struct mlx5e_tc_flow *flow) +{ + return mlx5e_is_eswitch_flow(flow) ? + MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL; +} static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow_parse_attr *parse_attr) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int num_actions, actions_size, namespace, err; struct mlx5e_mod_hdr_entry *mh; + struct mod_hdr_tbl *tbl; struct mod_hdr_key key; - bool found = false; u32 hash_key; num_actions = parse_attr->num_mod_hdr_actions; @@ -218,80 +362,82 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, hash_key = hash_mod_hdr_info(&key); - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { - namespace = MLX5_FLOW_NAMESPACE_FDB; - hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh, - mod_hdr_hlist, hash_key) { - if (!cmp_mod_hdr_info(&mh->key, &key)) { - found = true; - break; - } - } - } else { - namespace = MLX5_FLOW_NAMESPACE_KERNEL; - hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh, - mod_hdr_hlist, hash_key) { - if (!cmp_mod_hdr_info(&mh->key, &key)) { - found = true; - break; - } - } - } + namespace = get_flow_name_space(flow); + tbl = get_mod_hdr_table(priv, namespace); + + mutex_lock(&tbl->lock); + mh = mlx5e_mod_hdr_get(tbl, &key, hash_key); + if (mh) { + mutex_unlock(&tbl->lock); + wait_for_completion(&mh->res_ready); - if (found) + if (mh->compl_result < 0) { + err = -EREMOTEIO; + goto attach_header_err; + } goto attach_flow; + } mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL); - if (!mh) + if (!mh) { + mutex_unlock(&tbl->lock); return -ENOMEM; + } mh->key.actions = (void *)mh + sizeof(*mh); memcpy(mh->key.actions, key.actions, actions_size); mh->key.num_actions = num_actions; + spin_lock_init(&mh->flows_lock); INIT_LIST_HEAD(&mh->flows); - - err = mlx5_modify_header_alloc(priv->mdev, namespace, - mh->key.num_actions, - mh->key.actions, - &mh->mod_hdr_id); - if (err) - goto out_err; - - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) - hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key); - else - hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key); + refcount_set(&mh->refcnt, 1); + init_completion(&mh->res_ready); + + hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key); + mutex_unlock(&tbl->lock); + + mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace, + mh->key.num_actions, + mh->key.actions); + if (IS_ERR(mh->modify_hdr)) { + err = PTR_ERR(mh->modify_hdr); + mh->compl_result = err; + goto alloc_header_err; + } + mh->compl_result = 1; + complete_all(&mh->res_ready); attach_flow: + flow->mh = mh; + spin_lock(&mh->flows_lock); list_add(&flow->mod_hdr, &mh->flows); - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) - flow->esw_attr->mod_hdr_id = mh->mod_hdr_id; + spin_unlock(&mh->flows_lock); + if (mlx5e_is_eswitch_flow(flow)) + flow->esw_attr->modify_hdr = mh->modify_hdr; else - flow->nic_attr->mod_hdr_id = mh->mod_hdr_id; + flow->nic_attr->modify_hdr = mh->modify_hdr; return 0; -out_err: - kfree(mh); +alloc_header_err: + complete_all(&mh->res_ready); +attach_header_err: + mlx5e_mod_hdr_put(priv, mh, namespace); return err; } static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { - struct list_head *next = flow->mod_hdr.next; + /* flow wasn't fully initialized */ + if (!flow->mh) + return; + spin_lock(&flow->mh->flows_lock); list_del(&flow->mod_hdr); + spin_unlock(&flow->mh->flows_lock); - if (list_empty(next)) { - struct mlx5e_mod_hdr_entry *mh; - - mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows); - - mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id); - hash_del(&mh->mod_hdr_hlist); - kfree(mh); - } + mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow)); + flow->mh = NULL; } static @@ -555,13 +701,35 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe, hairpin_hlist, hash_key) { - if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) + if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) { + refcount_inc(&hpe->refcnt); return hpe; + } } return NULL; } +static void mlx5e_hairpin_put(struct mlx5e_priv *priv, + struct mlx5e_hairpin_entry *hpe) +{ + /* no more hairpin flows for us, release the hairpin pair */ + if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock)) + return; + hash_del(&hpe->hairpin_hlist); + mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); + + if (!IS_ERR_OR_NULL(hpe->hp)) { + netdev_dbg(priv->netdev, "del hairpin: peer %s\n", + dev_name(hpe->hp->pair->peer_mdev->device)); + + mlx5e_hairpin_destroy(hpe->hp); + } + + WARN_ON(!list_empty(&hpe->flows)); + kfree(hpe); +} + #define UNKNOWN_MATCH_PRIO 8 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, @@ -627,17 +795,37 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, extack); if (err) return err; + + mutex_lock(&priv->fs.tc.hairpin_tbl_lock); hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); - if (hpe) + if (hpe) { + mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); + wait_for_completion(&hpe->res_ready); + + if (IS_ERR(hpe->hp)) { + err = -EREMOTEIO; + goto out_err; + } goto attach_flow; + } hpe = kzalloc(sizeof(*hpe), GFP_KERNEL); - if (!hpe) + if (!hpe) { + mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); return -ENOMEM; + } + spin_lock_init(&hpe->flows_lock); INIT_LIST_HEAD(&hpe->flows); + INIT_LIST_HEAD(&hpe->dead_peer_wait_list); hpe->peer_vhca_id = peer_id; hpe->prio = match_prio; + refcount_set(&hpe->refcnt, 1); + init_completion(&hpe->res_ready); + + hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, + hash_hairpin_info(peer_id, match_prio)); + mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); params.log_data_size = 15; params.log_data_size = min_t(u8, params.log_data_size, @@ -659,9 +847,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, params.num_channels = link_speed64; hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex); + hpe->hp = hp; + complete_all(&hpe->res_ready); if (IS_ERR(hp)) { err = PTR_ERR(hp); - goto create_hairpin_err; + goto out_err; } netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", @@ -669,46 +859,39 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, dev_name(hp->pair->peer_mdev->device), hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets); - hpe->hp = hp; - hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, - hash_hairpin_info(peer_id, match_prio)); - attach_flow: if (hpe->hp->num_channels > 1) { - flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS; + flow_flag_set(flow, HAIRPIN_RSS); flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t; } else { flow->nic_attr->hairpin_tirn = hpe->hp->tirn; } + + flow->hpe = hpe; + spin_lock(&hpe->flows_lock); list_add(&flow->hairpin, &hpe->flows); + spin_unlock(&hpe->flows_lock); return 0; -create_hairpin_err: - kfree(hpe); +out_err: + mlx5e_hairpin_put(priv, hpe); return err; } static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { - struct list_head *next = flow->hairpin.next; + /* flow wasn't fully initialized */ + if (!flow->hpe) + return; + spin_lock(&flow->hpe->flows_lock); list_del(&flow->hairpin); + spin_unlock(&flow->hpe->flows_lock); - /* no more hairpin flows for us, release the hairpin pair */ - if (list_empty(next)) { - struct mlx5e_hairpin_entry *hpe; - - hpe = list_entry(next, struct mlx5e_hairpin_entry, flows); - - netdev_dbg(priv->netdev, "del hairpin: peer %s\n", - dev_name(hpe->hp->pair->peer_mdev->device)); - - mlx5e_hairpin_destroy(hpe->hp); - hash_del(&hpe->hairpin_hlist); - kfree(hpe); - } + mlx5e_hairpin_put(priv, flow->hpe); + flow->hpe = NULL; } static int @@ -723,22 +906,20 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, struct mlx5_flow_destination dest[2] = {}; struct mlx5_flow_act flow_act = { .action = attr->action, - .reformat_id = 0, .flags = FLOW_ACT_NO_APPEND, }; struct mlx5_fc *counter = NULL; - bool table_created = false; int err, dest_ix = 0; flow_context->flags |= FLOW_CONTEXT_HAS_TAG; flow_context->flow_tag = attr->flow_tag; - if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) { + if (flow_flag_test(flow, HAIRPIN)) { err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); - if (err) { - goto err_add_hairpin_flow; - } - if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) { + if (err) + return err; + + if (flow_flag_test(flow, HAIRPIN_RSS)) { dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[dest_ix].ft = attr->hairpin_ft; } else { @@ -754,10 +935,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(dev, true); - if (IS_ERR(counter)) { - err = PTR_ERR(counter); - goto err_fc_create; - } + if (IS_ERR(counter)) + return PTR_ERR(counter); + dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[dest_ix].counter_id = mlx5_fc_id(counter); dest_ix++; @@ -766,12 +946,13 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); - flow_act.modify_id = attr->mod_hdr_id; + flow_act.modify_hdr = attr->modify_hdr; kfree(parse_attr->mod_hdr_actions); if (err) - goto err_create_mod_hdr_id; + return err; } + mutex_lock(&priv->fs.tc.t_lock); if (IS_ERR_OR_NULL(priv->fs.tc.t)) { int tc_grp_size, tc_tbl_size; u32 max_flow_counter; @@ -791,15 +972,13 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, MLX5E_TC_TABLE_NUM_GROUPS, MLX5E_TC_FT_LEVEL, 0); if (IS_ERR(priv->fs.tc.t)) { + mutex_unlock(&priv->fs.tc.t_lock); NL_SET_ERR_MSG_MOD(extack, "Failed to create tc offload table\n"); netdev_err(priv->netdev, "Failed to create tc offload table\n"); - err = PTR_ERR(priv->fs.tc.t); - goto err_create_ft; + return PTR_ERR(priv->fs.tc.t); } - - table_created = true; } if (attr->match_level != MLX5_MATCH_NONE) @@ -807,29 +986,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, &flow_act, dest, dest_ix); + mutex_unlock(&priv->fs.tc.t_lock); - if (IS_ERR(flow->rule[0])) { - err = PTR_ERR(flow->rule[0]); - goto err_add_rule; - } - - return 0; - -err_add_rule: - if (table_created) { - mlx5_destroy_flow_table(priv->fs.tc.t); - priv->fs.tc.t = NULL; - } -err_create_ft: - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - mlx5e_detach_mod_hdr(priv, flow); -err_create_mod_hdr_id: - mlx5_fc_destroy(dev, counter); -err_fc_create: - if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) - mlx5e_hairpin_flow_del(priv, flow); -err_add_hairpin_flow: - return err; + return PTR_ERR_OR_ZERO(flow->rule[0]); } static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, @@ -839,18 +998,21 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, struct mlx5_fc *counter = NULL; counter = attr->counter; - mlx5_del_flow_rules(flow->rule[0]); + if (!IS_ERR_OR_NULL(flow->rule[0])) + mlx5_del_flow_rules(flow->rule[0]); mlx5_fc_destroy(priv->mdev, counter); - if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) { + mutex_lock(&priv->fs.tc.t_lock); + if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; } + mutex_unlock(&priv->fs.tc.t_lock); if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); - if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) + if (flow_flag_test(flow, HAIRPIN)) mlx5e_hairpin_flow_del(priv, flow); } @@ -885,7 +1047,6 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, } } - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; return rule; } @@ -894,7 +1055,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, struct mlx5e_tc_flow *flow, struct mlx5_esw_flow_attr *attr) { - flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; + flow_flag_clear(flow, OFFLOADED); if (attr->split_count) mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); @@ -917,7 +1078,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); if (!IS_ERR(rule)) - flow->flags |= MLX5E_TC_FLOW_SLOW; + flow_flag_set(flow, SLOW); return rule; } @@ -932,7 +1093,26 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, slow_attr->split_count = 0; slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); - flow->flags &= ~MLX5E_TC_FLOW_SLOW; + flow_flag_clear(flow, SLOW); +} + +/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this + * function. + */ +static void unready_flow_add(struct mlx5e_tc_flow *flow, + struct list_head *unready_flows) +{ + flow_flag_set(flow, NOT_READY); + list_add_tail(&flow->unready, unready_flows); +} + +/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this + * function. + */ +static void unready_flow_del(struct mlx5e_tc_flow *flow) +{ + list_del(&flow->unready); + flow_flag_clear(flow, NOT_READY); } static void add_unready_flow(struct mlx5e_tc_flow *flow) @@ -945,14 +1125,24 @@ static void add_unready_flow(struct mlx5e_tc_flow *flow) rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_priv = &rpriv->uplink_priv; - flow->flags |= MLX5E_TC_FLOW_NOT_READY; - list_add_tail(&flow->unready, &uplink_priv->unready_flows); + mutex_lock(&uplink_priv->unready_flows_lock); + unready_flow_add(flow, &uplink_priv->unready_flows); + mutex_unlock(&uplink_priv->unready_flows_lock); } static void remove_unready_flow(struct mlx5e_tc_flow *flow) { - list_del(&flow->unready); - flow->flags &= ~MLX5E_TC_FLOW_NOT_READY; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *rpriv; + struct mlx5_eswitch *esw; + + esw = flow->priv->mdev->priv.eswitch; + rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &rpriv->uplink_priv; + + mutex_lock(&uplink_priv->unready_flows_lock); + unready_flow_del(flow); + mutex_unlock(&uplink_priv->unready_flows_lock); } static int @@ -980,14 +1170,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, if (attr->chain > max_chain) { NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); - err = -EOPNOTSUPP; - goto err_max_prio_chain; + return -EOPNOTSUPP; } if (attr->prio > max_prio) { NL_SET_ERR_MSG(extack, "Requested priority is out of supported range"); - err = -EOPNOTSUPP; - goto err_max_prio_chain; + return -EOPNOTSUPP; } for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { @@ -1002,7 +1190,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, err = mlx5e_attach_encap(priv, flow, out_dev, out_index, extack, &encap_dev, &encap_valid); if (err) - goto err_attach_encap; + return err; out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; @@ -1012,21 +1200,19 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, err = mlx5_eswitch_add_vlan_action(esw, attr); if (err) - goto err_add_vlan; + return err; if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); kfree(parse_attr->mod_hdr_actions); if (err) - goto err_mod_hdr; + return err; } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(attr->counter_dev, true); - if (IS_ERR(counter)) { - err = PTR_ERR(counter); - goto err_create_counter; - } + if (IS_ERR(counter)) + return PTR_ERR(counter); attr->counter = counter; } @@ -1044,27 +1230,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); } - if (IS_ERR(flow->rule[0])) { - err = PTR_ERR(flow->rule[0]); - goto err_add_rule; - } + if (IS_ERR(flow->rule[0])) + return PTR_ERR(flow->rule[0]); + else + flow_flag_set(flow, OFFLOADED); return 0; - -err_add_rule: - mlx5_fc_destroy(attr->counter_dev, counter); -err_create_counter: - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - mlx5e_detach_mod_hdr(priv, flow); -err_mod_hdr: - mlx5_eswitch_del_vlan_action(esw, attr); -err_add_vlan: - for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) - if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) - mlx5e_detach_encap(priv, flow, out_index); -err_attach_encap: -err_max_prio_chain: - return err; } static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow) @@ -1088,14 +1259,14 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr slow_attr; int out_index; - if (flow->flags & MLX5E_TC_FLOW_NOT_READY) { + if (flow_flag_test(flow, NOT_READY)) { remove_unready_flow(flow); kvfree(attr->parse_attr); return; } - if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { - if (flow->flags & MLX5E_TC_FLOW_SLOW) + if (mlx5e_is_offloaded_flow(flow)) { + if (flow_flag_test(flow, SLOW)) mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); else mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); @@ -1119,39 +1290,39 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, } void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e) + struct mlx5e_encap_entry *e, + struct list_head *flow_list) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr slow_attr, *esw_attr; struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; - struct encap_flow_item *efi; struct mlx5e_tc_flow *flow; int err; - err = mlx5_packet_reformat_alloc(priv->mdev, - e->reformat_type, - e->encap_size, e->encap_header, - MLX5_FLOW_NAMESPACE_FDB, - &e->encap_id); - if (err) { - mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n", - err); + e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, + e->encap_size, e->encap_header, + MLX5_FLOW_NAMESPACE_FDB); + if (IS_ERR(e->pkt_reformat)) { + mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n", + PTR_ERR(e->pkt_reformat)); return; } e->flags |= MLX5_ENCAP_ENTRY_VALID; mlx5e_rep_queue_neigh_stats_work(priv); - list_for_each_entry(efi, &e->flows, list) { + list_for_each_entry(flow, flow_list, tmp_list) { bool all_flow_encaps_valid = true; int i; - flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); + if (!mlx5e_is_offloaded_flow(flow)) + continue; esw_attr = flow->esw_attr; spec = &esw_attr->parse_attr->spec; - esw_attr->dests[efi->index].encap_id = e->encap_id; - esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID; + esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat; + esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; /* Flow can be associated with multiple encap entries. * Before offloading the flow verify that all of them have * a valid neighbour. @@ -1177,30 +1348,32 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, } mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */ flow->rule[0] = rule; + /* was unset when slow path rule removed */ + flow_flag_set(flow, OFFLOADED); } } void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e) + struct mlx5e_encap_entry *e, + struct list_head *flow_list) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr slow_attr; struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; - struct encap_flow_item *efi; struct mlx5e_tc_flow *flow; int err; - list_for_each_entry(efi, &e->flows, list) { - flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); + list_for_each_entry(flow, flow_list, tmp_list) { + if (!mlx5e_is_offloaded_flow(flow)) + continue; spec = &flow->esw_attr->parse_attr->spec; /* update from encap rule to slow path rule */ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr); /* mark the flow's encap dest as non-valid */ - flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; + flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -1210,28 +1383,102 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, } mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr); - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */ flow->rule[0] = rule; + /* was unset when fast path rule removed */ + flow_flag_set(flow, OFFLOADED); } /* we know that the encap is valid */ e->flags &= ~MLX5_ENCAP_ENTRY_VALID; - mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id); + mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); } static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) { - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + if (mlx5e_is_eswitch_flow(flow)) return flow->esw_attr->counter; else return flow->nic_attr->counter; } +/* Takes reference to all flows attached to encap and adds the flows to + * flow_list using 'tmp_list' list_head in mlx5e_tc_flow. + */ +void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list) +{ + struct encap_flow_item *efi; + struct mlx5e_tc_flow *flow; + + list_for_each_entry(efi, &e->flows, list) { + flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); + if (IS_ERR(mlx5e_flow_get(flow))) + continue; + wait_for_completion(&flow->init_done); + + flow->tmp_efi_index = efi->index; + list_add(&flow->tmp_list, flow_list); + } +} + +/* Iterate over tmp_list of flows attached to flow_list head. */ +void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list) +{ + struct mlx5e_tc_flow *flow, *tmp; + + list_for_each_entry_safe(flow, tmp, flow_list, tmp_list) + mlx5e_flow_put(priv, flow); +} + +static struct mlx5e_encap_entry * +mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe, + struct mlx5e_encap_entry *e) +{ + struct mlx5e_encap_entry *next = NULL; + +retry: + rcu_read_lock(); + + /* find encap with non-zero reference counter value */ + for (next = e ? + list_next_or_null_rcu(&nhe->encap_list, + &e->encap_list, + struct mlx5e_encap_entry, + encap_list) : + list_first_or_null_rcu(&nhe->encap_list, + struct mlx5e_encap_entry, + encap_list); + next; + next = list_next_or_null_rcu(&nhe->encap_list, + &next->encap_list, + struct mlx5e_encap_entry, + encap_list)) + if (mlx5e_encap_take(next)) + break; + + rcu_read_unlock(); + + /* release starting encap */ + if (e) + mlx5e_encap_put(netdev_priv(e->out_dev), e); + if (!next) + return next; + + /* wait for encap to be fully initialized */ + wait_for_completion(&next->res_ready); + /* continue searching if encap entry is not in valid state after completion */ + if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) { + e = next; + goto retry; + } + + return next; +} + void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) { struct mlx5e_neigh *m_neigh = &nhe->m_neigh; + struct mlx5e_encap_entry *e = NULL; struct mlx5e_tc_flow *flow; - struct mlx5e_encap_entry *e; struct mlx5_fc *counter; struct neigh_table *tbl; bool neigh_used = false; @@ -1242,19 +1489,30 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) tbl = &arp_tbl; #if IS_ENABLED(CONFIG_IPV6) else if (m_neigh->family == AF_INET6) - tbl = &nd_tbl; + tbl = ipv6_stub->nd_tbl; #endif else return; - list_for_each_entry(e, &nhe->encap_list, encap_list) { - struct encap_flow_item *efi; - if (!(e->flags & MLX5_ENCAP_ENTRY_VALID)) - continue; - list_for_each_entry(efi, &e->flows, list) { + /* mlx5e_get_next_valid_encap() releases previous encap before returning + * next one. + */ + while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) { + struct mlx5e_priv *priv = netdev_priv(e->out_dev); + struct encap_flow_item *efi, *tmp; + struct mlx5_eswitch *esw; + LIST_HEAD(flow_list); + + esw = priv->mdev->priv.eswitch; + mutex_lock(&esw->offloads.encap_tbl_lock); + list_for_each_entry_safe(efi, tmp, &e->flows, list) { flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); - if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + if (IS_ERR(mlx5e_flow_get(flow))) + continue; + list_add(&flow->tmp_list, &flow_list); + + if (mlx5e_is_offloaded_flow(flow)) { counter = mlx5e_tc_get_counter(flow); lastuse = mlx5_fc_query_lastuse(counter); if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { @@ -1263,10 +1521,18 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) } } } - if (neigh_used) + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_put_encap_flow_list(priv, &flow_list); + if (neigh_used) { + /* release current encap before breaking the loop */ + mlx5e_encap_put(priv, e); break; + } } + trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used); + if (neigh_used) { nhe->reported_lastuse = jiffies; @@ -1282,40 +1548,69 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) } } -static void mlx5e_detach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, int out_index) +static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) { - struct list_head *next = flow->encaps[out_index].list.next; + WARN_ON(!list_empty(&e->flows)); - list_del(&flow->encaps[out_index].list); - if (list_empty(next)) { - struct mlx5e_encap_entry *e; - - e = list_entry(next, struct mlx5e_encap_entry, flows); + if (e->compl_result > 0) { mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); if (e->flags & MLX5_ENCAP_ENTRY_VALID) - mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id); + mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); + } - hash_del_rcu(&e->encap_hlist); - kfree(e->encap_header); - kfree(e); + kfree(e->encap_header); + kfree_rcu(e, rcu); +} + +void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock)) + return; + hash_del_rcu(&e->encap_hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_encap_dealloc(priv, e); +} + +static void mlx5e_detach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, int out_index) +{ + struct mlx5e_encap_entry *e = flow->encaps[out_index].e; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + /* flow wasn't fully initialized */ + if (!e) + return; + + mutex_lock(&esw->offloads.encap_tbl_lock); + list_del(&flow->encaps[out_index].list); + flow->encaps[out_index].e = NULL; + if (!refcount_dec_and_test(&e->refcnt)) { + mutex_unlock(&esw->offloads.encap_tbl_lock); + return; } + hash_del_rcu(&e->encap_hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_encap_dealloc(priv, e); } static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; - if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) || - !(flow->flags & MLX5E_TC_FLOW_DUP)) + if (!flow_flag_test(flow, ESWITCH) || + !flow_flag_test(flow, DUP)) return; mutex_lock(&esw->offloads.peer_mutex); list_del(&flow->peer); mutex_unlock(&esw->offloads.peer_mutex); - flow->flags &= ~MLX5E_TC_FLOW_DUP; + flow_flag_clear(flow, DUP); mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); kvfree(flow->peer_flow); @@ -1339,7 +1634,7 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { + if (mlx5e_is_eswitch_flow(flow)) { mlx5e_tc_del_fdb_peer_flow(flow); mlx5e_tc_del_fdb_flow(priv, flow); } else { @@ -1596,7 +1891,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, *match_level = MLX5_MATCH_L2; } } else if (*match_level != MLX5_MATCH_NONE) { - MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); + /* cvlan_tag enabled in match criteria and + * disabled in match value means both S & C tags + * don't exist (untagged of both) + */ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); *match_level = MLX5_MATCH_L2; } @@ -1840,6 +2138,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep; + bool is_eswitch_flow; int err; inner_match_level = MLX5_MATCH_NONE; @@ -1850,7 +2149,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ? outer_match_level : inner_match_level; - if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { + is_eswitch_flow = mlx5e_is_eswitch_flow(flow); + if (!err && is_eswitch_flow) { rep = rpriv->rep; if (rep->vport != MLX5_VPORT_UPLINK && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && @@ -1864,7 +2164,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, } } - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { + if (is_eswitch_flow) { flow->esw_attr->inner_match_level = inner_match_level; flow->esw_attr->outer_match_level = outer_match_level; } else { @@ -2385,14 +2685,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv, { u32 actions; - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + if (mlx5e_is_eswitch_flow(flow)) actions = flow->esw_attr->action; else actions = flow->nic_attr->action; - if (flow->flags & MLX5E_TC_FLOW_EGRESS && + if (flow_flag_test(flow, EGRESS) && !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) || - (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP))) + (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || + (actions & MLX5_FLOW_CONTEXT_ACTION_DROP))) return false; if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) @@ -2542,7 +2843,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, if (priv->netdev->netdev_ops == peer_dev->netdev_ops && same_hw_devs(priv, netdev_priv(peer_dev))) { parse_attr->mirred_ifindex[0] = peer_dev->ifindex; - flow->flags |= MLX5E_TC_FLOW_HAIRPIN; + flow_flag_set(flow, HAIRPIN); action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; } else { @@ -2629,6 +2930,31 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, +bool mlx5e_encap_take(struct mlx5e_encap_entry *e) +{ + return refcount_inc_not_zero(&e->refcnt); +} + +static struct mlx5e_encap_entry * +mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key, + uintptr_t hash_key) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_encap_entry *e; + struct encap_key e_key; + + hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, + encap_hlist, hash_key) { + e_key.ip_tun_key = &e->tun_info->key; + e_key.tc_tunnel = e->tunnel; + if (!cmp_encap_info(&e_key, key) && + mlx5e_encap_take(e)) + return e; + } + + return NULL; +} + static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct net_device *mirred_dev, @@ -2641,11 +2967,10 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5e_tc_flow_parse_attr *parse_attr; const struct ip_tunnel_info *tun_info; - struct encap_key key, e_key; + struct encap_key key; struct mlx5e_encap_entry *e; unsigned short family; uintptr_t hash_key; - bool found = false; int err = 0; parse_attr = attr->parse_attr; @@ -2660,57 +2985,78 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, hash_key = hash_encap_info(&key); - hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, - encap_hlist, hash_key) { - e_key.ip_tun_key = &e->tun_info->key; - e_key.tc_tunnel = e->tunnel; - if (!cmp_encap_info(&e_key, &key)) { - found = true; - break; - } - } + mutex_lock(&esw->offloads.encap_tbl_lock); + e = mlx5e_encap_get(priv, &key, hash_key); /* must verify if encap is valid or not */ - if (found) + if (e) { + mutex_unlock(&esw->offloads.encap_tbl_lock); + wait_for_completion(&e->res_ready); + + /* Protect against concurrent neigh update. */ + mutex_lock(&esw->offloads.encap_tbl_lock); + if (e->compl_result < 0) { + err = -EREMOTEIO; + goto out_err; + } goto attach_flow; + } e = kzalloc(sizeof(*e), GFP_KERNEL); - if (!e) - return -ENOMEM; + if (!e) { + err = -ENOMEM; + goto out_err; + } + + refcount_set(&e->refcnt, 1); + init_completion(&e->res_ready); e->tun_info = tun_info; err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); - if (err) + if (err) { + kfree(e); + e = NULL; goto out_err; + } INIT_LIST_HEAD(&e->flows); + hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); + mutex_unlock(&esw->offloads.encap_tbl_lock); if (family == AF_INET) err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e); else if (family == AF_INET6) err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e); - if (err) + /* Protect against concurrent neigh update. */ + mutex_lock(&esw->offloads.encap_tbl_lock); + complete_all(&e->res_ready); + if (err) { + e->compl_result = err; goto out_err; - - hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); + } + e->compl_result = 1; attach_flow: + flow->encaps[out_index].e = e; list_add(&flow->encaps[out_index].list, &e->flows); flow->encaps[out_index].index = out_index; *encap_dev = e->out_dev; if (e->flags & MLX5_ENCAP_ENTRY_VALID) { - attr->dests[out_index].encap_id = e->encap_id; + attr->dests[out_index].pkt_reformat = e->pkt_reformat; attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; *encap_valid = true; } else { *encap_valid = false; } + mutex_unlock(&esw->offloads.encap_tbl_lock); return err; out_err: - kfree(e); + mutex_unlock(&esw->offloads.encap_tbl_lock); + if (e) + mlx5e_encap_put(priv, e); return err; } @@ -2890,12 +3236,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (netdev_port_same_parent_id(priv->netdev, out_dev)) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); - struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev); + struct net_device *uplink_upper; + rcu_read_lock(); + uplink_upper = + netdev_master_upper_dev_get_rcu(uplink_dev); if (uplink_upper && netif_is_lag_master(uplink_upper) && uplink_upper == out_dev) out_dev = uplink_dev; + rcu_read_unlock(); if (is_vlan_dev(out_dev)) { err = add_vlan_push_action(priv, attr, @@ -3066,19 +3416,19 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return 0; } -static void get_flags(int flags, u16 *flow_flags) +static void get_flags(int flags, unsigned long *flow_flags) { - u16 __flow_flags = 0; + unsigned long __flow_flags = 0; - if (flags & MLX5E_TC_INGRESS) - __flow_flags |= MLX5E_TC_FLOW_INGRESS; - if (flags & MLX5E_TC_EGRESS) - __flow_flags |= MLX5E_TC_FLOW_EGRESS; + if (flags & MLX5_TC_FLAG(INGRESS)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS); + if (flags & MLX5_TC_FLAG(EGRESS)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS); - if (flags & MLX5E_TC_ESW_OFFLOAD) - __flow_flags |= MLX5E_TC_FLOW_ESWITCH; - if (flags & MLX5E_TC_NIC_OFFLOAD) - __flow_flags |= MLX5E_TC_FLOW_NIC; + if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); + if (flags & MLX5_TC_FLAG(NIC_OFFLOAD)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); *flow_flags = __flow_flags; } @@ -3090,12 +3440,13 @@ static const struct rhashtable_params tc_ht_params = { .automatic_shrinking = true, }; -static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags) +static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, + unsigned long flags) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *uplink_rpriv; - if (flags & MLX5E_TC_ESW_OFFLOAD) { + if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) { uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); return &uplink_rpriv->uplink_priv.tc_ht; } else /* NIC offload */ @@ -3106,7 +3457,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) { struct mlx5_esw_flow_attr *attr = flow->esw_attr; bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK && - flow->flags & MLX5E_TC_FLOW_INGRESS; + flow_flag_test(flow, INGRESS); bool act_is_encap = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom, @@ -3125,13 +3476,13 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) static int mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, - struct flow_cls_offload *f, u16 flow_flags, + struct flow_cls_offload *f, unsigned long flow_flags, struct mlx5e_tc_flow_parse_attr **__parse_attr, struct mlx5e_tc_flow **__flow) { struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow *flow; - int err; + int out_index, err; flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL); parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); @@ -3143,6 +3494,12 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, flow->cookie = f->cookie; flow->flags = flow_flags; flow->priv = priv; + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) + INIT_LIST_HEAD(&flow->encaps[out_index].list); + INIT_LIST_HEAD(&flow->mod_hdr); + INIT_LIST_HEAD(&flow->hairpin); + refcount_set(&flow->refcnt, 1); + init_completion(&flow->init_done); *__flow = flow; *__parse_attr = parse_attr; @@ -3182,7 +3539,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr, static struct mlx5e_tc_flow * __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - u16 flow_flags, + unsigned long flow_flags, struct net_device *filter_dev, struct mlx5_eswitch_rep *in_rep, struct mlx5_core_dev *in_mdev) @@ -3193,7 +3550,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; int attr_size, err; - flow_flags |= MLX5E_TC_FLOW_ESWITCH; + flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); attr_size = sizeof(struct mlx5_esw_flow_attr); err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, &parse_attr, &flow); @@ -3215,6 +3572,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, goto err_free; err = mlx5e_tc_add_fdb_flow(priv, flow, extack); + complete_all(&flow->init_done); if (err) { if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev))) goto err_free; @@ -3225,15 +3583,14 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, return flow; err_free: - kfree(flow); - kvfree(parse_attr); + mlx5e_flow_put(priv, flow); out: return ERR_PTR(err); } static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, struct mlx5e_tc_flow *flow, - u16 flow_flags) + unsigned long flow_flags) { struct mlx5e_priv *priv = flow->priv, *peer_priv; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw; @@ -3271,7 +3628,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, } flow->peer_flow = peer_flow; - flow->flags |= MLX5E_TC_FLOW_DUP; + flow_flag_set(flow, DUP); mutex_lock(&esw->offloads.peer_mutex); list_add_tail(&flow->peer, &esw->offloads.peer_flows); mutex_unlock(&esw->offloads.peer_mutex); @@ -3284,7 +3641,7 @@ out: static int mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - u16 flow_flags, + unsigned long flow_flags, struct net_device *filter_dev, struct mlx5e_tc_flow **__flow) { @@ -3318,7 +3675,7 @@ out: static int mlx5e_add_nic_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - u16 flow_flags, + unsigned long flow_flags, struct net_device *filter_dev, struct mlx5e_tc_flow **__flow) { @@ -3332,7 +3689,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common)) return -EOPNOTSUPP; - flow_flags |= MLX5E_TC_FLOW_NIC; + flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); attr_size = sizeof(struct mlx5_nic_flow_attr); err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, &parse_attr, &flow); @@ -3353,14 +3710,14 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, if (err) goto err_free; - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; + flow_flag_set(flow, OFFLOADED); kvfree(parse_attr); *__flow = flow; return 0; err_free: - kfree(flow); + mlx5e_flow_put(priv, flow); kvfree(parse_attr); out: return err; @@ -3369,12 +3726,12 @@ out: static int mlx5e_tc_add_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - int flags, + unsigned long flags, struct net_device *filter_dev, struct mlx5e_tc_flow **flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - u16 flow_flags; + unsigned long flow_flags; int err; get_flags(flags, &flow_flags); @@ -3393,14 +3750,16 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv, } int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags) + struct flow_cls_offload *f, unsigned long flags) { struct netlink_ext_ack *extack = f->common.extack; struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct mlx5e_tc_flow *flow; int err = 0; - flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); + rcu_read_lock(); + flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); + rcu_read_unlock(); if (flow) { NL_SET_ERR_MSG_MOD(extack, "flow cookie already exists, ignoring"); @@ -3411,55 +3770,68 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, goto out; } + trace_mlx5e_configure_flower(f); err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); if (err) goto out; - err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params); + err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params); if (err) goto err_free; return 0; err_free: - mlx5e_tc_del_flow(priv, flow); - kfree(flow); + mlx5e_flow_put(priv, flow); out: return err; } -#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS) -#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS) - static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags) { - if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK)) - return true; + bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS)); + bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS)); - return false; + return flow_flag_test(flow, INGRESS) == dir_ingress && + flow_flag_test(flow, EGRESS) == dir_egress; } int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags) + struct flow_cls_offload *f, unsigned long flags) { struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct mlx5e_tc_flow *flow; + int err; + rcu_read_lock(); flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); - if (!flow || !same_flow_direction(flow, flags)) - return -EINVAL; + if (!flow || !same_flow_direction(flow, flags)) { + err = -EINVAL; + goto errout; + } + /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag + * set. + */ + if (flow_flag_test_and_set(flow, DELETED)) { + err = -EINVAL; + goto errout; + } rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); + rcu_read_unlock(); - mlx5e_tc_del_flow(priv, flow); - - kfree(flow); + trace_mlx5e_delete_flower(f); + mlx5e_flow_put(priv, flow); return 0; + +errout: + rcu_read_unlock(); + return err; } int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags) + struct flow_cls_offload *f, unsigned long flags) { struct mlx5_devcom *devcom = priv->mdev->priv.devcom; struct rhashtable *tc_ht = get_tc_ht(priv, flags); @@ -3469,15 +3841,24 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, u64 lastuse = 0; u64 packets = 0; u64 bytes = 0; + int err = 0; - flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); - if (!flow || !same_flow_direction(flow, flags)) - return -EINVAL; + rcu_read_lock(); + flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie, + tc_ht_params)); + rcu_read_unlock(); + if (IS_ERR(flow)) + return PTR_ERR(flow); - if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + if (!same_flow_direction(flow, flags)) { + err = -EINVAL; + goto errout; + } + + if (mlx5e_is_offloaded_flow(flow)) { counter = mlx5e_tc_get_counter(flow); if (!counter) - return 0; + goto errout; mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); } @@ -3489,8 +3870,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, if (!peer_esw) goto out; - if ((flow->flags & MLX5E_TC_FLOW_DUP) && - (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) { + if (flow_flag_test(flow, DUP) && + flow_flag_test(flow->peer_flow, OFFLOADED)) { u64 bytes2; u64 packets2; u64 lastuse2; @@ -3509,15 +3890,118 @@ no_peer_counter: mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); out: flow_stats_update(&f->stats, bytes, packets, lastuse); + trace_mlx5e_stats_flower(f); +errout: + mlx5e_flow_put(priv, flow); + return err; +} + +static int apply_police_params(struct mlx5e_priv *priv, u32 rate, + struct netlink_ext_ack *extack) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch *esw; + u16 vport_num; + u32 rate_mbps; + int err; + + esw = priv->mdev->priv.eswitch; + /* rate is given in bytes/sec. + * First convert to bits/sec and then round to the nearest mbit/secs. + * mbit means million bits. + * Moreover, if rate is non zero we choose to configure to a minimum of + * 1 mbit/sec. + */ + rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; + vport_num = rpriv->rep->vport; + + err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); + if (err) + NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); + + return err; +} + +static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct netlink_ext_ack *extack) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + const struct flow_action_entry *act; + int err; + int i; + + if (!flow_action_has_entries(flow_action)) { + NL_SET_ERR_MSG_MOD(extack, "matchall called with no action"); + return -EINVAL; + } + + if (!flow_offload_has_one_action(flow_action)) { + NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action"); + return -EOPNOTSUPP; + } + + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_POLICE: + err = apply_police_params(priv, act->police.rate_bytes_ps, extack); + if (err) + return err; + + rpriv->prev_vf_vport_stats = priv->stats.vf_vport; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall"); + return -EOPNOTSUPP; + } + } return 0; } +int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) +{ + struct netlink_ext_ack *extack = ma->common.extack; + int prio = TC_H_MAJ(ma->common.prio) >> 16; + + if (prio != 1) { + NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); + return -EINVAL; + } + + return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack); +} + +int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) +{ + struct netlink_ext_ack *extack = ma->common.extack; + + return apply_police_params(priv, 0, extack); +} + +void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct rtnl_link_stats64 cur_stats; + u64 dbytes; + u64 dpkts; + + cur_stats = priv->stats.vf_vport; + dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; + dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; + rpriv->prev_vf_vport_stats = cur_stats; + flow_stats_update(&ma->stats, dpkts, dbytes, jiffies); +} + static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) { struct mlx5_core_dev *peer_mdev = peer_priv->mdev; - struct mlx5e_hairpin_entry *hpe; + struct mlx5e_hairpin_entry *hpe, *tmp; + LIST_HEAD(init_wait_list); u16 peer_vhca_id; int bkt; @@ -3526,9 +4010,18 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id); - hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) { - if (hpe->peer_vhca_id == peer_vhca_id) + mutex_lock(&priv->fs.tc.hairpin_tbl_lock); + hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) + if (refcount_inc_not_zero(&hpe->refcnt)) + list_add(&hpe->dead_peer_wait_list, &init_wait_list); + mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); + + list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) { + wait_for_completion(&hpe->res_ready); + if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id) hpe->hp->pair->peer_gone = true; + + mlx5e_hairpin_put(priv, hpe); } } @@ -3564,7 +4057,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) struct mlx5e_tc_table *tc = &priv->fs.tc; int err; - hash_init(tc->mod_hdr_tbl); + mutex_init(&tc->t_lock); + mutex_init(&tc->mod_hdr.lock); + hash_init(tc->mod_hdr.hlist); + mutex_init(&tc->hairpin_tbl_lock); hash_init(tc->hairpin_tbl); err = rhashtable_init(&tc->ht, &tc_ht_params); @@ -3596,12 +4092,16 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) if (tc->netdevice_nb.notifier_call) unregister_netdevice_notifier(&tc->netdevice_nb); + mutex_destroy(&tc->mod_hdr.lock); + mutex_destroy(&tc->hairpin_tbl_lock); + rhashtable_destroy(&tc->ht); if (!IS_ERR_OR_NULL(tc->t)) { mlx5_destroy_flow_table(tc->t); tc->t = NULL; } + mutex_destroy(&tc->t_lock); } int mlx5e_tc_esw_init(struct rhashtable *tc_ht) @@ -3614,7 +4114,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); } -int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) +int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) { struct rhashtable *tc_ht = get_tc_ht(priv, flags); @@ -3636,10 +4136,10 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work) reoffload_flows_work); struct mlx5e_tc_flow *flow, *tmp; - rtnl_lock(); + mutex_lock(&rpriv->unready_flows_lock); list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) { if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL)) - remove_unready_flow(flow); + unready_flow_del(flow); } - rtnl_unlock(); + mutex_unlock(&rpriv->unready_flows_lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 3ab39275ca7d..924c6ef86a14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -40,13 +40,15 @@ #ifdef CONFIG_MLX5_ESWITCH enum { - MLX5E_TC_INGRESS = BIT(0), - MLX5E_TC_EGRESS = BIT(1), - MLX5E_TC_NIC_OFFLOAD = BIT(2), - MLX5E_TC_ESW_OFFLOAD = BIT(3), - MLX5E_TC_LAST_EXPORTED_BIT = 3, + MLX5E_TC_FLAG_INGRESS_BIT, + MLX5E_TC_FLAG_EGRESS_BIT, + MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, + MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, + MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, }; +#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT) + int mlx5e_tc_nic_init(struct mlx5e_priv *priv); void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); @@ -54,23 +56,37 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht); void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags); + struct flow_cls_offload *f, unsigned long flags); int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags); + struct flow_cls_offload *f, unsigned long flags); int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags); + struct flow_cls_offload *f, unsigned long flags); + +int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *f); +int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *f); +void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma); struct mlx5e_encap_entry; void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e); + struct mlx5e_encap_entry *e, + struct list_head *flow_list); void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e); + struct mlx5e_encap_entry *e, + struct list_head *flow_list); +bool mlx5e_encap_take(struct mlx5e_encap_entry *e); +void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e); + +void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list); +void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list); struct mlx5e_neigh_hash_entry; void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); -int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags); +int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags); void mlx5e_tc_reoffload_flows_work(struct work_struct *work); @@ -80,7 +96,11 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, #else /* CONFIG_MLX5_ESWITCH */ static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} -static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) { return 0; } +static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, + unsigned long flags) +{ + return 0; +} #endif #endif /* __MLX5_EN_TC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 600e92cb629a..d3a67a9b4eba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -210,7 +210,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int fsz = skb_frag_size(frag); dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, @@ -292,8 +292,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; stats->packets += skb_shinfo(skb)->gso_segs; } else { - u8 mode = mlx5e_transport_inline_tx_wqe(wqe) ? - MLX5_INLINE_MODE_TCP_UDP : sq->min_inline_mode; + u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb); opcode = MLX5_OPCODE_SEND; mss = 0; @@ -608,9 +607,11 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; stats->packets += skb_shinfo(skb)->gso_segs; } else { + u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb); + opcode = MLX5_OPCODE_SEND; mss = 0; - ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); + ihs = mlx5e_calc_min_inline(mode, skb); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); stats->packets++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 49b06b256c92..257a7c9f7a14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -33,6 +33,7 @@ #include <linux/irq.h> #include "en.h" #include "en/xdp.h" +#include "en/xsk/rx.h" #include "en/xsk/tx.h" static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) @@ -81,6 +82,29 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq) mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); } +static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq) +{ + bool busy_xsk = false, xsk_rx_alloc_err; + + /* Handle the race between the application querying need_wakeup and the + * driver setting it: + * 1. Update need_wakeup both before and after the TX. If it goes to + * "yes", it can only happen with the first update. + * 2. If the application queried need_wakeup before we set it, the + * packets will be transmitted anyway, even w/o a wakeup. + * 3. Give a chance to clear need_wakeup after new packets were queued + * for TX. + */ + mlx5e_xsk_update_tx_wakeup(xsksq); + busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET); + mlx5e_xsk_update_tx_wakeup(xsksq); + + xsk_rx_alloc_err = xskrq->post_wqes(xskrq); + busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err); + + return busy_xsk; +} + int mlx5e_napi_poll(struct napi_struct *napi, int budget) { struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, @@ -122,8 +146,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (xsk_open) { mlx5e_poll_ico_cq(&c->xskicosq.cq); busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); - busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET); - busy_xsk |= xskrq->post_wqes(xskrq); + busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); } busy |= busy_xsk; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 41f25ea2e8d9..580c71cb9dfa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb, */ dma_rmb(); - if (likely(eqe->type < MLX5_EVENT_TYPE_MAX)) - atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe); - else - mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type); - + atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe); atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe); ++eq->cons_index; @@ -328,10 +324,13 @@ err_buf: /** * mlx5_eq_enable - Enable EQ for receiving EQEs - * @dev - Device which owns the eq - * @eq - EQ to enable - * @nb - notifier call block - * mlx5_eq_enable - must be called after EQ is created in device. + * @dev : Device which owns the eq + * @eq : EQ to enable + * @nb : Notifier call block + * + * Must be called after EQ is created in device. + * + * @return: 0 if no error */ int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, struct notifier_block *nb) @@ -348,11 +347,12 @@ int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, EXPORT_SYMBOL(mlx5_eq_enable); /** - * mlx5_eq_disable - Enable EQ for receiving EQEs - * @dev - Device which owns the eq - * @eq - EQ to disable - * @nb - notifier call block - * mlx5_eq_disable - must be called before EQ is destroyed. + * mlx5_eq_disable - Disable EQ for receiving EQEs + * @dev : Device which owns the eq + * @eq : EQ to disable + * @nb : Notifier call block + * + * Must be called before EQ is destroyed. */ void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, struct notifier_block *nb) @@ -415,7 +415,7 @@ void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) int mlx5_eq_table_init(struct mlx5_core_dev *dev) { struct mlx5_eq_table *eq_table; - int i, err; + int i; eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL); if (!eq_table) @@ -423,9 +423,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev) dev->priv.eq_table = eq_table; - err = mlx5_eq_debugfs_init(dev); - if (err) - goto kvfree_eq_table; + mlx5_eq_debugfs_init(dev); mutex_init(&eq_table->lock); for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++) @@ -433,11 +431,6 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev) eq_table->irq_table = dev->priv.irq_table; return 0; - -kvfree_eq_table: - kvfree(eq_table); - dev->priv.eq_table = NULL; - return err; } void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev) @@ -945,9 +938,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb) { struct mlx5_eq_table *eqt = dev->priv.eq_table; - if (nb->event_type >= MLX5_EVENT_TYPE_MAX) - return -EINVAL; - return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb); } EXPORT_SYMBOL(mlx5_eq_notifier_register); @@ -956,9 +946,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb) { struct mlx5_eq_table *eqt = dev->priv.eq_table; - if (nb->event_type >= MLX5_EVENT_TYPE_MAX) - return -EINVAL; - return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb); } EXPORT_SYMBOL(mlx5_eq_notifier_unregister); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 1f3891fde2eb..30aae76b6a1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -58,20 +58,9 @@ struct vport_addr { bool mc_promisc; }; -enum { - UC_ADDR_CHANGE = BIT(0), - MC_ADDR_CHANGE = BIT(1), - PROMISC_CHANGE = BIT(3), -}; - static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw); static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw); -/* Vport context events */ -#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \ - MC_ADDR_CHANGE | \ - PROMISC_CHANGE) - struct mlx5_vport *__must_check mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) { @@ -108,13 +97,13 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); - if (events_mask & UC_ADDR_CHANGE) + if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE) MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_uc_address_change, 1); - if (events_mask & MC_ADDR_CHANGE) + if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE) MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_mc_address_change, 1); - if (events_mask & PROMISC_CHANGE) + if (events_mask & MLX5_VPORT_PROMISC_CHANGE) MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_promisc_change, 1); @@ -463,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw) return err; } +#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ + MLX5_VPORT_MC_ADDR_CHANGE | \ + MLX5_VPORT_PROMISC_CHANGE) + +static int esw_legacy_enable(struct mlx5_eswitch *esw) +{ + int ret; + + ret = esw_create_legacy_table(esw); + if (ret) + return ret; + + mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); + return 0; +} + static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) { esw_cleanup_vepa_rules(esw); @@ -470,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) esw_destroy_legacy_vepa_table(esw); } +static void esw_legacy_disable(struct mlx5_eswitch *esw) +{ + struct esw_mc_addr *mc_promisc; + + mlx5_eswitch_disable_pf_vf_vports(esw); + + mc_promisc = &esw->mc_promisc; + if (mc_promisc->uplink_rule) + mlx5_del_flow_rules(mc_promisc->uplink_rule); + + esw_destroy_legacy_table(esw); +} + /* E-Switch vport UC/MC lists management */ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, struct vport_addr *vaddr); @@ -901,21 +919,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport) esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", vport->vport, mac); - if (vport->enabled_events & UC_ADDR_CHANGE) { + if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) { esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); } - if (vport->enabled_events & MC_ADDR_CHANGE) + if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE) esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); - if (vport->enabled_events & PROMISC_CHANGE) { + if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) { esw_update_vport_rx_mode(esw, vport); if (!IS_ERR_OR_NULL(vport->allmulti_rule)) esw_update_vport_mc_promisc(esw, vport); } - if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) + if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE)) esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); @@ -1393,18 +1411,49 @@ out: return err; } +static bool element_type_supported(struct mlx5_eswitch *esw, int type) +{ + const struct mlx5_core_dev *dev = esw->dev; + + switch (type) { + case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_TASR; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_VPORT; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_VPORT_TC; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; + } + return false; +} + /* Vport QoS management */ -static int esw_create_tsar(struct mlx5_eswitch *esw) +static void esw_create_tsar(struct mlx5_eswitch *esw) { u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_core_dev *dev = esw->dev; + __be32 *attr; int err; if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) - return 0; + return; + + if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR)) + return; if (esw->qos.enabled) - return -EEXIST; + return; + + MLX5_SET(scheduling_context, tsar_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); + + attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); + *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16); err = mlx5_create_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -1412,11 +1461,10 @@ static int esw_create_tsar(struct mlx5_eswitch *esw) &esw->qos.root_tsar_id); if (err) { esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); - return err; + return; } esw->qos.enabled = true; - return 0; } static void esw_destroy_tsar(struct mlx5_eswitch *esw) @@ -1537,6 +1585,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, return 0; } +int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, + u32 rate_mbps) +{ + u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_vport *vport; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); + + return mlx5_modify_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + ctx, + vport->qos.esw_tsar_ix, + MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW); +} + static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) { ((u8 *)node_guid)[7] = mac[0]; @@ -1619,7 +1683,7 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport) } static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, - int enable_events) + enum mlx5_eswitch_vport_event enabled_events) { u16 vport_num = vport->vport; @@ -1641,7 +1705,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num); /* Sync with current vport context */ - vport->enabled_events = enable_events; + vport->enabled_events = enabled_events; vport->enabled = true; /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well @@ -1770,11 +1834,46 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) /* Public E-Switch API */ #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) -int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) +/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs + * whichever are present on the eswitch. + */ +void +mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, + enum mlx5_eswitch_vport_event enabled_events) { struct mlx5_vport *vport; + int i; + + /* Enable PF vport */ + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); + esw_enable_vport(esw, vport, enabled_events); + + /* Enable ECPF vports */ + if (mlx5_ecpf_vport_exists(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); + esw_enable_vport(esw, vport, enabled_events); + } + + /* Enable VF vports */ + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + esw_enable_vport(esw, vport, enabled_events); +} + +/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs + * whichever are previously enabled on the eswitch. + */ +void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) +{ + struct mlx5_vport *vport; + int i; + + mlx5_esw_for_all_vports_reverse(esw, i, vport) + esw_disable_vport(esw, vport); +} + +int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) +{ int err; - int i, enabled_events; if (!ESW_ALLOWED(esw) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { @@ -1788,44 +1887,23 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) esw_warn(esw->dev, "engress ACL is not supported by FW\n"); + esw_create_tsar(esw); + esw->mode = mode; mlx5_lag_update(esw->dev); if (mode == MLX5_ESWITCH_LEGACY) { - err = esw_create_legacy_table(esw); - if (err) - goto abort; + err = esw_legacy_enable(esw); } else { mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - err = esw_offloads_init(esw); + err = esw_offloads_enable(esw); } if (err) goto abort; - err = esw_create_tsar(esw); - if (err) - esw_warn(esw->dev, "Failed to create eswitch TSAR"); - - enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS : - UC_ADDR_CHANGE; - - /* Enable PF vport */ - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - esw_enable_vport(esw, vport, enabled_events); - - /* Enable ECPF vports */ - if (mlx5_ecpf_vport_exists(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); - esw_enable_vport(esw, vport, enabled_events); - } - - /* Enable VF vports */ - mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) - esw_enable_vport(esw, vport, enabled_events); - mlx5_eswitch_event_handlers_register(esw); esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", @@ -1847,10 +1925,7 @@ abort: void mlx5_eswitch_disable(struct mlx5_eswitch *esw) { - struct esw_mc_addr *mc_promisc; - struct mlx5_vport *vport; int old_mode; - int i; if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE) return; @@ -1859,21 +1934,14 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw) esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", esw->esw_funcs.num_vfs, esw->enabled_vports); - mc_promisc = &esw->mc_promisc; mlx5_eswitch_event_handlers_unregister(esw); - mlx5_esw_for_all_vports(esw, i, vport) - esw_disable_vport(esw, vport); - - if (mc_promisc && mc_promisc->uplink_rule) - mlx5_del_flow_rules(mc_promisc->uplink_rule); - - esw_destroy_tsar(esw); - if (esw->mode == MLX5_ESWITCH_LEGACY) - esw_destroy_legacy_table(esw); + esw_legacy_disable(esw); else if (esw->mode == MLX5_ESWITCH_OFFLOADS) - esw_offloads_cleanup(esw); + esw_offloads_disable(esw); + + esw_destroy_tsar(esw); old_mode = esw->mode; esw->mode = MLX5_ESWITCH_NONE; @@ -1931,8 +1999,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) if (err) goto abort; + mutex_init(&esw->offloads.encap_tbl_lock); hash_init(esw->offloads.encap_tbl); - hash_init(esw->offloads.mod_hdr_tbl); + mutex_init(&esw->offloads.mod_hdr.lock); + hash_init(esw->offloads.mod_hdr.hlist); + atomic64_set(&esw->offloads.num_flows, 0); mutex_init(&esw->state_lock); mlx5_esw_for_all_vports(esw, i, vport) { @@ -1968,6 +2039,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); esw_offloads_cleanup_reps(esw); + mutex_destroy(&esw->offloads.mod_hdr.lock); + mutex_destroy(&esw->offloads.encap_tbl_lock); kfree(esw->vports); kfree(esw); } @@ -2085,23 +2158,19 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, if (vlan > 4095 || qos > 7) return -EINVAL; - mutex_lock(&esw->state_lock); - err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); if (err) - goto unlock; + return err; evport->info.vlan = vlan; evport->info.qos = qos; if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) { err = esw_vport_ingress_config(esw, evport); if (err) - goto unlock; + return err; err = esw_vport_egress_config(esw, evport); } -unlock: - mutex_unlock(&esw->state_lock); return err; } @@ -2109,11 +2178,16 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u16 vport, u16 vlan, u8 qos) { u8 set_flags = 0; + int err; if (vlan || qos) set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; - return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags); + mutex_lock(&esw->state_lock); + err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags); + mutex_unlock(&esw->state_lock); + + return err; } int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 04685dbb280c..6bd6f5895244 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -35,6 +35,7 @@ #include <linux/if_ether.h> #include <linux/if_link.h> +#include <linux/atomic.h> #include <net/devlink.h> #include <linux/mlx5/device.h> #include <linux/mlx5/eswitch.h> @@ -68,7 +69,7 @@ struct vport_ingress { struct mlx5_flow_group *allow_spoofchk_only_grp; struct mlx5_flow_group *allow_untagged_only_grp; struct mlx5_flow_group *drop_grp; - int modify_metadata_id; + struct mlx5_modify_hdr *modify_metadata; struct mlx5_flow_handle *modify_metadata_rule; struct mlx5_flow_handle *allow_rule; struct mlx5_flow_handle *drop_rule; @@ -101,6 +102,13 @@ struct mlx5_vport_info { bool trusted; }; +/* Vport context events */ +enum mlx5_eswitch_vport_event { + MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), + MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), + MLX5_VPORT_PROMISC_CHANGE = BIT(3), +}; + struct mlx5_vport { struct mlx5_core_dev *dev; int vport; @@ -122,7 +130,7 @@ struct mlx5_vport { } qos; bool enabled; - u16 enabled_events; + enum mlx5_eswitch_vport_event enabled_events; }; enum offloads_fdb_flags { @@ -145,6 +153,7 @@ struct mlx5_eswitch_fdb { } legacy; struct offloads_fdb { + struct mlx5_flow_namespace *ns; struct mlx5_flow_table *slow_fdb; struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *peer_miss_grp; @@ -173,13 +182,14 @@ struct mlx5_esw_offload { struct mlx5_eswitch_rep *vport_reps; struct list_head peer_flows; struct mutex peer_mutex; + struct mutex encap_tbl_lock; /* protects encap_tbl */ DECLARE_HASHTABLE(encap_tbl, 8); - DECLARE_HASHTABLE(mod_hdr_tbl, 8); + struct mod_hdr_tbl mod_hdr; DECLARE_HASHTABLE(termtbl_tbl, 8); struct mutex termtbl_mutex; /* protects termtbl hash */ const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; u8 inline_mode; - u64 num_flows; + atomic64_t num_flows; enum devlink_eswitch_encap_mode encap; }; @@ -207,8 +217,11 @@ enum { struct mlx5_eswitch { struct mlx5_core_dev *dev; struct mlx5_nb nb; + /* legacy data structures */ struct mlx5_eswitch_fdb fdb_table; struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; + struct esw_mc_addr mc_promisc; + /* end of legacy */ struct workqueue_struct *work_queue; struct mlx5_vport *vports; u32 flags; @@ -218,7 +231,6 @@ struct mlx5_eswitch { * and async SRIOV admin state changes */ struct mutex state_lock; - struct esw_mc_addr mc_promisc; struct { bool enabled; @@ -233,8 +245,8 @@ struct mlx5_eswitch { struct mlx5_esw_functions esw_funcs; }; -void esw_offloads_cleanup(struct mlx5_eswitch *esw); -int esw_offloads_init(struct mlx5_eswitch *esw); +void esw_offloads_disable(struct mlx5_eswitch *esw); +int esw_offloads_enable(struct mlx5_eswitch *esw); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw); void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, @@ -251,6 +263,8 @@ void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, struct mlx5_vport *vport); void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, + u32 rate_mbps); /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); @@ -372,11 +386,11 @@ struct mlx5_esw_flow_attr { struct { u32 flags; struct mlx5_eswitch_rep *rep; + struct mlx5_pkt_reformat *pkt_reformat; struct mlx5_core_dev *mdev; - u32 encap_id; struct mlx5_termtbl_handle *termtbl; } dests[MLX5_MAX_FLOW_FWD_VPORTS]; - u32 mod_hdr_id; + struct mlx5_modify_hdr *modify_hdr; u8 inner_match_level; u8 outer_match_level; struct mlx5_fc *counter; @@ -513,6 +527,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); (vport) = &(esw)->vports[i], \ (i) < (esw)->total_vports; (i)++) +#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \ + for ((i) = (esw)->total_vports - 1; \ + (vport) = &(esw)->vports[i], \ + (i) >= MLX5_VPORT_PF; (i)--) + #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ for ((i) = MLX5_VPORT_FIRST_VF; \ (vport) = &(esw)->vports[(i)], \ @@ -574,6 +593,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); +void +mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, + enum mlx5_eswitch_vport_event enabled_events); +void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 0323fd078271..00d71db15f22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -190,10 +190,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, MLX5_FLOW_DEST_VPORT_VHCA_ID; if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; - flow_act.reformat_id = attr->dests[j].encap_id; + flow_act.pkt_reformat = attr->dests[j].pkt_reformat; dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; - dest[i].vport.reformat_id = - attr->dests[j].encap_id; + dest[i].vport.pkt_reformat = + attr->dests[j].pkt_reformat; } i++; } @@ -213,7 +213,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - flow_act.modify_id = attr->mod_hdr_id; + flow_act.modify_hdr = attr->modify_hdr; fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split); if (IS_ERR(fdb)) { @@ -229,7 +229,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (IS_ERR(rule)) goto err_add_rule; else - esw->offloads.num_flows++; + atomic64_inc(&esw->offloads.num_flows); return rule; @@ -276,7 +276,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) { dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; - dest[i].vport.reformat_id = attr->dests[i].encap_id; + dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat; } } dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; @@ -294,7 +294,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, if (IS_ERR(rule)) goto add_err; - esw->offloads.num_flows++; + atomic64_inc(&esw->offloads.num_flows); return rule; add_err: @@ -322,7 +322,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl); } - esw->offloads.num_flows--; + atomic64_dec(&esw->offloads.num_flows); if (fwd_rule) { esw_put_prio_table(esw, attr->chain, attr->prio, 1); @@ -438,9 +438,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !attr->dest_chain); + mutex_lock(&esw->state_lock); + err = esw_add_vlan_action_check(attr, push, pop, fwd); if (err) - return err; + goto unlock; attr->vlan_handled = false; @@ -453,11 +455,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, attr->vlan_handled = true; } - return 0; + goto unlock; } if (!push && !pop) - return 0; + goto unlock; if (!(offloads->vlan_push_pop_refcount)) { /* it's the 1st vlan rule, apply global vlan pop policy */ @@ -482,6 +484,8 @@ skip_set_push: out: if (!err) attr->vlan_handled = true; +unlock: + mutex_unlock(&esw->state_lock); return err; } @@ -504,6 +508,8 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); + mutex_lock(&esw->state_lock); + vport = esw_vlan_action_get_vport(attr, push, pop); if (!push && !pop && fwd) { @@ -511,7 +517,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) vport->vlan_refcount--; - return 0; + goto out; } if (push) { @@ -529,12 +535,13 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, skip_unset_push: offloads->vlan_push_pop_refcount--; if (offloads->vlan_push_pop_refcount) - return 0; + goto out; /* no more vlan rules, stop global vlan pop policy */ err = esw_set_global_vlan_pop(esw, 0); out: + mutex_unlock(&esw->state_lock); return err; } @@ -583,38 +590,15 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) mlx5_del_flow_rules(rule); } -static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw) +static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) { u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; u8 fdb_to_vport_reg_c_id; int err; - err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, - out, sizeof(out)); - if (err) - return err; - - fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out, - esw_vport_context.fdb_to_vport_reg_c_id); - - fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0; - MLX5_SET(modify_esw_vport_context_in, in, - esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id); - - MLX5_SET(modify_esw_vport_context_in, in, - field_select.fdb_to_vport_reg_c_id, 1); - - return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport, - in, sizeof(in)); -} - -static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw) -{ - u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; - u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; - u8 fdb_to_vport_reg_c_id; - int err; + if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) + return 0; err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, out, sizeof(out)); @@ -624,7 +608,10 @@ static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw) fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out, esw_vport_context.fdb_to_vport_reg_c_id); - fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0; + if (enable) + fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0; + else + fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0; MLX5_SET(modify_esw_vport_context_in, in, esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id); @@ -1081,6 +1068,13 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) err = -EOPNOTSUPP; goto ns_err; } + esw->fdb_table.offloads.ns = root_ns; + err = mlx5_flow_namespace_set_mode(root_ns, + esw->dev->priv.steering->mode); + if (err) { + esw_warn(dev, "Failed to set FDB namespace steering mode\n"); + goto ns_err; + } max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | MLX5_CAP_GEN(dev, max_flow_counter_15_0); @@ -1220,6 +1214,8 @@ send_vport_err: esw_destroy_offloads_fast_fdb_tables(esw); mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); slow_fdb_err: + /* Holds true only as long as DMFS is the default */ + mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS); ns_err: kvfree(flow_group_in); return err; @@ -1239,6 +1235,9 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); esw_destroy_offloads_fast_fdb_tables(esw); + /* Holds true only as long as DMFS is the default */ + mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, + MLX5_FLOW_STEERING_MODE_DMFS); } static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports) @@ -1402,10 +1401,9 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) int esw_offloads_init_reps(struct mlx5_eswitch *esw) { int total_vports = esw->total_vports; - struct mlx5_core_dev *dev = esw->dev; struct mlx5_eswitch_rep *rep; - u8 hw_id[ETH_ALEN], rep_type; int vport_index; + u8 rep_type; esw->offloads.vport_reps = kcalloc(total_vports, sizeof(struct mlx5_eswitch_rep), @@ -1413,12 +1411,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw) if (!esw->offloads.vport_reps) return -ENOMEM; - mlx5_query_mac_address(dev, hw_id); - mlx5_esw_for_all_reps(esw, vport_index, rep) { rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index); rep->vport_index = vport_index; - ether_addr_copy(rep->hw_id, hw_id); for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) atomic_set(&rep->rep_data[rep_type].state, @@ -1640,13 +1635,42 @@ static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) esw_del_fdb_peer_miss_rules(esw); } +static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, + struct mlx5_eswitch *peer_esw, + bool pair) +{ + struct mlx5_flow_root_namespace *peer_ns; + struct mlx5_flow_root_namespace *ns; + int err; + + peer_ns = peer_esw->dev->priv.steering->fdb_root_ns; + ns = esw->dev->priv.steering->fdb_root_ns; + + if (pair) { + err = mlx5_flow_namespace_set_peer(ns, peer_ns); + if (err) + return err; + + err = mlx5_flow_namespace_set_peer(peer_ns, ns); + if (err) { + mlx5_flow_namespace_set_peer(ns, NULL); + return err; + } + } else { + mlx5_flow_namespace_set_peer(ns, NULL); + mlx5_flow_namespace_set_peer(peer_ns, NULL); + } + + return 0; +} + static int mlx5_esw_offloads_devcom_event(int event, void *my_data, void *event_data) { struct mlx5_eswitch *esw = my_data; - struct mlx5_eswitch *peer_esw = event_data; struct mlx5_devcom *devcom = esw->dev->priv.devcom; + struct mlx5_eswitch *peer_esw = event_data; int err; switch (event) { @@ -1655,9 +1679,12 @@ static int mlx5_esw_offloads_devcom_event(int event, mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) break; - err = mlx5_esw_offloads_pair(esw, peer_esw); + err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); if (err) goto err_out; + err = mlx5_esw_offloads_pair(esw, peer_esw); + if (err) + goto err_peer; err = mlx5_esw_offloads_pair(peer_esw, esw); if (err) @@ -1673,6 +1700,7 @@ static int mlx5_esw_offloads_devcom_event(int event, mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); mlx5_esw_offloads_unpair(peer_esw); mlx5_esw_offloads_unpair(esw); + mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); break; } @@ -1680,7 +1708,8 @@ static int mlx5_esw_offloads_devcom_event(int event, err_pair: mlx5_esw_offloads_unpair(esw); - +err_peer: + mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); err_out: mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d", event, err); @@ -1751,7 +1780,7 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, if (vport->ingress.modify_metadata_rule) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - flow_act.modify_id = vport->ingress.modify_metadata_id; + flow_act.modify_hdr = vport->ingress.modify_metadata; } vport->ingress.allow_rule = @@ -1787,9 +1816,11 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, MLX5_SET(set_action_in, action, data, mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport)); - err = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, - 1, action, &vport->ingress.modify_metadata_id); - if (err) { + vport->ingress.modify_metadata = + mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, + 1, action); + if (IS_ERR(vport->ingress.modify_metadata)) { + err = PTR_ERR(vport->ingress.modify_metadata); esw_warn(esw->dev, "failed to alloc modify header for vport %d ingress acl (%d)\n", vport->vport, err); @@ -1797,7 +1828,7 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, } flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW; - flow_act.modify_id = vport->ingress.modify_metadata_id; + flow_act.modify_hdr = vport->ingress.modify_metadata; vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl, &spec, &flow_act, NULL, 0); if (IS_ERR(vport->ingress.modify_metadata_rule)) { @@ -1811,7 +1842,7 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, out: if (err) - mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id); + mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata); return err; } @@ -1820,7 +1851,7 @@ void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, { if (vport->ingress.modify_metadata_rule) { mlx5_del_flow_rules(vport->ingress.modify_metadata_rule); - mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id); + mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata); vport->ingress.modify_metadata_rule = NULL; } @@ -2120,7 +2151,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type return NOTIFY_OK; } -int esw_offloads_init(struct mlx5_eswitch *esw) +int esw_offloads_enable(struct mlx5_eswitch *esw) { int err; @@ -2130,15 +2161,16 @@ int esw_offloads_init(struct mlx5_eswitch *esw) else esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; + mlx5_rdma_enable_roce(esw->dev); err = esw_offloads_steering_init(esw); if (err) - return err; + goto err_steering_init; - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { - err = mlx5_eswitch_enable_passing_vport_metadata(esw); - if (err) - goto err_vport_metadata; - } + err = esw_set_passing_vport_metadata(esw, true); + if (err) + goto err_vport_metadata; + + mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); err = esw_offloads_load_all_reps(esw); if (err) @@ -2147,15 +2179,15 @@ int esw_offloads_init(struct mlx5_eswitch *esw) esw_offloads_devcom_init(esw); mutex_init(&esw->offloads.termtbl_mutex); - mlx5_rdma_enable_roce(esw->dev); - return 0; err_reps: - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) - mlx5_eswitch_disable_passing_vport_metadata(esw); + mlx5_eswitch_disable_pf_vf_vports(esw); + esw_set_passing_vport_metadata(esw, false); err_vport_metadata: esw_offloads_steering_cleanup(esw); +err_steering_init: + mlx5_rdma_disable_roce(esw->dev); return err; } @@ -2178,14 +2210,14 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, return err; } -void esw_offloads_cleanup(struct mlx5_eswitch *esw) +void esw_offloads_disable(struct mlx5_eswitch *esw) { - mlx5_rdma_disable_roce(esw->dev); esw_offloads_devcom_cleanup(esw); esw_offloads_unload_all_reps(esw); - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) - mlx5_eswitch_disable_passing_vport_metadata(esw); + mlx5_eswitch_disable_pf_vf_vports(esw); + esw_set_passing_vport_metadata(esw, false); esw_offloads_steering_cleanup(esw); + mlx5_rdma_disable_roce(esw->dev); esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; } @@ -2345,7 +2377,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, break; } - if (esw->offloads.num_flows > 0) { + if (atomic64_read(&esw->offloads.num_flows) > 0) { NL_SET_ERR_MSG_MOD(extack, "Can't set inline mode when flows are configured"); return -EOPNOTSUPP; @@ -2455,7 +2487,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, if (esw->offloads.encap == encap) return 0; - if (esw->offloads.num_flows > 0) { + if (atomic64_read(&esw->offloads.num_flows) > 0) { NL_SET_ERR_MSG_MOD(extack, "Can't set encapsulation when flows are configured"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 7ac1249eadc3..579c306caa7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -107,6 +107,50 @@ static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns, return 0; } +static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, + int reformat_type, + size_t size, + void *reformat_data, + enum mlx5_flow_namespace_type namespace, + struct mlx5_pkt_reformat *pkt_reformat) +{ + return 0; +} + +static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, + struct mlx5_pkt_reformat *pkt_reformat) +{ +} + +static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns, + u8 namespace, u8 num_actions, + void *modify_actions, + struct mlx5_modify_hdr *modify_hdr) +{ + return 0; +} + +static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, + struct mlx5_modify_hdr *modify_hdr) +{ +} + +static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_root_namespace *peer_ns) +{ + return 0; +} + +static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns) +{ + return 0; +} + +static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns) +{ + return 0; +} + static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 underlay_qpn, bool disconnect) @@ -182,7 +226,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, } else { MLX5_SET(create_flow_table_in, in, flow_table_context.table_miss_action, - ns->def_miss_action); + ft->def_miss_action); } break; @@ -262,7 +306,7 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, } else { MLX5_SET(modify_flow_table_in, in, flow_table_context.table_miss_action, - ns->def_miss_action); + ft->def_miss_action); } } @@ -412,11 +456,13 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, } else { MLX5_SET(flow_context, in_flow_context, action, fte->action.action); - MLX5_SET(flow_context, in_flow_context, packet_reformat_id, - fte->action.reformat_id); + if (fte->action.pkt_reformat) + MLX5_SET(flow_context, in_flow_context, packet_reformat_id, + fte->action.pkt_reformat->id); } - MLX5_SET(flow_context, in_flow_context, modify_header_id, - fte->action.modify_id); + if (fte->action.modify_hdr) + MLX5_SET(flow_context, in_flow_context, modify_header_id, + fte->action.modify_hdr->id); vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); @@ -468,7 +514,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_FLOW_DEST_VPORT_REFORMAT_ID)); MLX5_SET(extended_dest_format, in_dests, packet_reformat_id, - dst->dest_attr.vport.reformat_id); + dst->dest_attr.vport.pkt_reformat->id); } break; default: @@ -566,7 +612,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) +int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, + enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask, + u32 *id) { u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; @@ -574,6 +622,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) MLX5_SET(alloc_flow_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_FLOW_COUNTER); + MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (!err) @@ -581,6 +630,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) return err; } +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) +{ + return mlx5_cmd_fc_bulk_alloc(dev, 0, id); +} + int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id) { u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0}; @@ -615,77 +669,35 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, return 0; } -struct mlx5_cmd_fc_bulk { - u32 id; - int num; - int outlen; - u32 out[0]; -}; - -struct mlx5_cmd_fc_bulk * -mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num) -{ - struct mlx5_cmd_fc_bulk *b; - int outlen = - MLX5_ST_SZ_BYTES(query_flow_counter_out) + - MLX5_ST_SZ_BYTES(traffic_counter) * num; - - b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL); - if (!b) - return NULL; - - b->id = id; - b->num = num; - b->outlen = outlen; - - return b; -} - -void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b) +int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len) { - kfree(b); + return MLX5_ST_SZ_BYTES(query_flow_counter_out) + + MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len; } -int -mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b) +int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len, + u32 *out) { + int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len); u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; MLX5_SET(query_flow_counter_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_COUNTER); MLX5_SET(query_flow_counter_in, in, op_mod, 0); - MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id); - MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num); - return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen); + MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id); + MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } -void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, - struct mlx5_cmd_fc_bulk *b, u32 id, - u64 *packets, u64 *bytes) -{ - int index = id - b->id; - void *stats; - - if (index < 0 || index >= b->num) { - mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n", - id, b->id, b->id + b->num - 1); - return; - } - - stats = MLX5_ADDR_OF(query_flow_counter_out, b->out, - flow_statistics[index]); - *packets = MLX5_GET64(traffic_counter, stats, packets); - *bytes = MLX5_GET64(traffic_counter, stats, octets); -} - -int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, - int reformat_type, - size_t size, - void *reformat_data, - enum mlx5_flow_namespace_type namespace, - u32 *packet_reformat_id) +static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, + int reformat_type, + size_t size, + void *reformat_data, + enum mlx5_flow_namespace_type namespace, + struct mlx5_pkt_reformat *pkt_reformat) { u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)]; + struct mlx5_core_dev *dev = ns->dev; void *packet_reformat_context_in; int max_encap_size; void *reformat; @@ -728,35 +740,36 @@ int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, memset(out, 0, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); - *packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out, - out, packet_reformat_id); + pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out, + out, packet_reformat_id); kfree(in); return err; } -EXPORT_SYMBOL(mlx5_packet_reformat_alloc); -void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, - u32 packet_reformat_id) +static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, + struct mlx5_pkt_reformat *pkt_reformat) { u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)]; u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)]; + struct mlx5_core_dev *dev = ns->dev; memset(in, 0, sizeof(in)); MLX5_SET(dealloc_packet_reformat_context_in, in, opcode, MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT); MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id, - packet_reformat_id); + pkt_reformat->id); mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -EXPORT_SYMBOL(mlx5_packet_reformat_dealloc); -int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, - u8 namespace, u8 num_actions, - void *modify_actions, u32 *modify_header_id) +static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, + u8 namespace, u8 num_actions, + void *modify_actions, + struct mlx5_modify_hdr *modify_hdr) { u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)]; int max_actions, actions_size, inlen, err; + struct mlx5_core_dev *dev = ns->dev; void *actions_in; u8 table_type; u32 *in; @@ -807,26 +820,26 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, memset(out, 0, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); - *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id); + modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id); kfree(in); return err; } -EXPORT_SYMBOL(mlx5_modify_header_alloc); -void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id) +static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, + struct mlx5_modify_hdr *modify_hdr) { u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)]; u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)]; + struct mlx5_core_dev *dev = ns->dev; memset(in, 0, sizeof(in)); MLX5_SET(dealloc_modify_header_context_in, in, opcode, MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT); MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id, - modify_header_id); + modify_hdr->id); mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -EXPORT_SYMBOL(mlx5_modify_header_dealloc); static const struct mlx5_flow_cmds mlx5_flow_cmds = { .create_flow_table = mlx5_cmd_create_flow_table, @@ -838,6 +851,13 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = { .update_fte = mlx5_cmd_update_fte, .delete_fte = mlx5_cmd_delete_fte, .update_root_ft = mlx5_cmd_update_root_ft, + .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc, + .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc, + .modify_header_alloc = mlx5_cmd_modify_header_alloc, + .modify_header_dealloc = mlx5_cmd_modify_header_dealloc, + .set_peer = mlx5_cmd_stub_set_peer, + .create_ns = mlx5_cmd_stub_create_ns, + .destroy_ns = mlx5_cmd_stub_destroy_ns, }; static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = { @@ -850,9 +870,16 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = { .update_fte = mlx5_cmd_stub_update_fte, .delete_fte = mlx5_cmd_stub_delete_fte, .update_root_ft = mlx5_cmd_stub_update_root_ft, + .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc, + .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc, + .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc, + .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc, + .set_peer = mlx5_cmd_stub_set_peer, + .create_ns = mlx5_cmd_stub_create_ns, + .destroy_ns = mlx5_cmd_stub_destroy_ns, }; -static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void) +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void) { return &mlx5_flow_cmds; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index e340f9af2f5a..d62de642eca9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -75,24 +75,45 @@ struct mlx5_flow_cmds { struct mlx5_flow_table *ft, u32 underlay_qpn, bool disconnect); + + int (*packet_reformat_alloc)(struct mlx5_flow_root_namespace *ns, + int reformat_type, + size_t size, + void *reformat_data, + enum mlx5_flow_namespace_type namespace, + struct mlx5_pkt_reformat *pkt_reformat); + + void (*packet_reformat_dealloc)(struct mlx5_flow_root_namespace *ns, + struct mlx5_pkt_reformat *pkt_reformat); + + int (*modify_header_alloc)(struct mlx5_flow_root_namespace *ns, + u8 namespace, u8 num_actions, + void *modify_actions, + struct mlx5_modify_hdr *modify_hdr); + + void (*modify_header_dealloc)(struct mlx5_flow_root_namespace *ns, + struct mlx5_modify_hdr *modify_hdr); + + int (*set_peer)(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_root_namespace *peer_ns); + + int (*create_ns)(struct mlx5_flow_root_namespace *ns); + int (*destroy_ns)(struct mlx5_flow_root_namespace *ns); }; int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); +int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, + enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask, + u32 *id); int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id); int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, u64 *packets, u64 *bytes); -struct mlx5_cmd_fc_bulk; - -struct mlx5_cmd_fc_bulk * -mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num); -void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b); -int -mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b); -void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, - struct mlx5_cmd_fc_bulk *b, u32 id, - u64 *packets, u64 *bytes); +int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len); +int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len, + u32 *out); const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type); +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 3e99799bdb40..3bbb49354829 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -60,7 +60,8 @@ ADD_PRIO(num_prios_val, 0, num_levels_val, {},\ __VA_ARGS__)\ -#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\ +#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \ + .def_miss_action = def_miss_act,\ .children = (struct init_tree_node[]) {__VA_ARGS__},\ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ } @@ -131,33 +132,41 @@ static struct init_tree_node { int num_leaf_prios; int prio; int num_levels; + enum mlx5_flow_table_miss_action def_miss_action; } root_fs = { .type = FS_TYPE_NAMESPACE, .ar_size = 7, - .children = (struct init_tree_node[]) { - ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, - FS_CHAINING_CAPS, - ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, - BY_PASS_PRIO_NUM_LEVELS))), - ADD_PRIO(0, LAG_MIN_LEVEL, 0, - FS_CHAINING_CAPS, - ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, - LAG_PRIO_NUM_LEVELS))), - ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))), - ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, - FS_CHAINING_CAPS, - ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, - ETHTOOL_PRIO_NUM_LEVELS))), - ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS), - ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, - KERNEL_NIC_PRIO_NUM_LEVELS))), - ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, - FS_CHAINING_CAPS, - ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))), - ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))), + .children = (struct init_tree_node[]){ + ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + BY_PASS_PRIO_NUM_LEVELS))), + ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, + LAG_PRIO_NUM_LEVELS))), + ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {}, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, + OFFLOADS_MAX_FT))), + ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, + ETHTOOL_PRIO_NUM_LEVELS))), + ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, + KERNEL_NIC_TC_NUM_LEVELS), + ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, + KERNEL_NIC_PRIO_NUM_LEVELS))), + ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, + LEFTOVERS_NUM_LEVELS))), + ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, + ANCHOR_NUM_LEVELS))), } }; @@ -167,8 +176,29 @@ static struct init_tree_node egress_root_fs = { .children = (struct init_tree_node[]) { ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, FS_CHAINING_CAPS_EGRESS, - ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + BY_PASS_PRIO_NUM_LEVELS))), + } +}; + +#define RDMA_RX_BYPASS_PRIO 0 +#define RDMA_RX_KERNEL_PRIO 1 +static struct init_tree_node rdma_rx_root_fs = { + .type = FS_TYPE_NAMESPACE, + .ar_size = 2, + .children = (struct init_tree_node[]) { + [RDMA_RX_BYPASS_PRIO] = + ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), + [RDMA_RX_KERNEL_PRIO] = + ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, + ADD_MULTIPLE_PRIO(1, 1))), } }; @@ -1014,6 +1044,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); + ft->def_miss_action = ns->def_miss_action; err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft); if (err) goto free_ft; @@ -1384,7 +1415,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ? (d1->vport.vhca_id == d2->vport.vhca_id) : true) && ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ? - (d1->vport.reformat_id == d2->vport.reformat_id) : true)) || + (d1->vport.pkt_reformat->id == + d2->vport.pkt_reformat->id) : true)) || (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && d1->ft == d2->ft) || (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR && @@ -2056,16 +2088,18 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, if (steering->sniffer_tx_root_ns) return &steering->sniffer_tx_root_ns->ns; return NULL; - case MLX5_FLOW_NAMESPACE_RDMA_RX: - if (steering->rdma_rx_root_ns) - return &steering->rdma_rx_root_ns->ns; - return NULL; default: break; } if (type == MLX5_FLOW_NAMESPACE_EGRESS) { root_ns = steering->egress_root_ns; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) { + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_BYPASS_PRIO; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) { + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_KERNEL_PRIO; } else { /* Must be NIC RX */ root_ns = steering->root_ns; prio = type; @@ -2155,7 +2189,8 @@ static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace return ns; } -static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) +static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio, + int def_miss_act) { struct mlx5_flow_namespace *ns; @@ -2164,6 +2199,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) return ERR_PTR(-ENOMEM); fs_init_namespace(ns); + ns->def_miss_action = def_miss_act; tree_init_node(&ns->node, NULL, del_sw_ns); tree_add_node(&ns->node, &prio->node); list_add_tail(&ns->node.list, &prio->node.children); @@ -2230,7 +2266,7 @@ static int init_root_tree_recursive(struct mlx5_flow_steering *steering, base = &fs_prio->node; } else if (init_node->type == FS_TYPE_NAMESPACE) { fs_get_obj(fs_prio, fs_parent_node); - fs_ns = fs_create_namespace(fs_prio); + fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action); if (IS_ERR(fs_ns)) return PTR_ERR(fs_ns); base = &fs_ns->node; @@ -2494,18 +2530,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) { - struct fs_prio *prio; + int err; steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX); if (!steering->rdma_rx_root_ns) return -ENOMEM; - steering->rdma_rx_root_ns->def_miss_action = - MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN; + err = init_root_tree(steering, &rdma_rx_root_fs, + &steering->rdma_rx_root_ns->ns.node); + if (err) + goto out_err; - /* Create single prio */ - prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1); - return PTR_ERR_OR_ZERO(prio); + set_prio_attrs(steering->rdma_rx_root_ns); + + return 0; + +out_err: + cleanup_root_ns(steering->rdma_rx_root_ns); + steering->rdma_rx_root_ns = NULL; + return err; } static int init_fdb_root_ns(struct mlx5_flow_steering *steering) { @@ -2543,7 +2586,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) } for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) { - ns = fs_create_namespace(maj_prio); + ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); if (IS_ERR(ns)) { err = PTR_ERR(ns); goto out_err; @@ -2846,3 +2889,160 @@ out: return err; } EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn); + +static struct mlx5_flow_root_namespace +*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type) +{ + struct mlx5_flow_namespace *ns; + + if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS || + ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS) + ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0); + else + ns = mlx5_get_flow_namespace(dev, ns_type); + if (!ns) + return NULL; + + return find_root(&ns->node); +} + +struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, + u8 ns_type, u8 num_actions, + void *modify_actions) +{ + struct mlx5_flow_root_namespace *root; + struct mlx5_modify_hdr *modify_hdr; + int err; + + root = get_root_namespace(dev, ns_type); + if (!root) + return ERR_PTR(-EOPNOTSUPP); + + modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL); + if (!modify_hdr) + return ERR_PTR(-ENOMEM); + + modify_hdr->ns_type = ns_type; + err = root->cmds->modify_header_alloc(root, ns_type, num_actions, + modify_actions, modify_hdr); + if (err) { + kfree(modify_hdr); + return ERR_PTR(err); + } + + return modify_hdr; +} +EXPORT_SYMBOL(mlx5_modify_header_alloc); + +void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, + struct mlx5_modify_hdr *modify_hdr) +{ + struct mlx5_flow_root_namespace *root; + + root = get_root_namespace(dev, modify_hdr->ns_type); + if (WARN_ON(!root)) + return; + root->cmds->modify_header_dealloc(root, modify_hdr); + kfree(modify_hdr); +} +EXPORT_SYMBOL(mlx5_modify_header_dealloc); + +struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, + int reformat_type, + size_t size, + void *reformat_data, + enum mlx5_flow_namespace_type ns_type) +{ + struct mlx5_pkt_reformat *pkt_reformat; + struct mlx5_flow_root_namespace *root; + int err; + + root = get_root_namespace(dev, ns_type); + if (!root) + return ERR_PTR(-EOPNOTSUPP); + + pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL); + if (!pkt_reformat) + return ERR_PTR(-ENOMEM); + + pkt_reformat->ns_type = ns_type; + pkt_reformat->reformat_type = reformat_type; + err = root->cmds->packet_reformat_alloc(root, reformat_type, size, + reformat_data, ns_type, + pkt_reformat); + if (err) { + kfree(pkt_reformat); + return ERR_PTR(err); + } + + return pkt_reformat; +} +EXPORT_SYMBOL(mlx5_packet_reformat_alloc); + +void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, + struct mlx5_pkt_reformat *pkt_reformat) +{ + struct mlx5_flow_root_namespace *root; + + root = get_root_namespace(dev, pkt_reformat->ns_type); + if (WARN_ON(!root)) + return; + root->cmds->packet_reformat_dealloc(root, pkt_reformat); + kfree(pkt_reformat); +} +EXPORT_SYMBOL(mlx5_packet_reformat_dealloc); + +int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_root_namespace *peer_ns) +{ + if (peer_ns && ns->mode != peer_ns->mode) { + mlx5_core_err(ns->dev, + "Can't peer namespace of different steering mode\n"); + return -EINVAL; + } + + return ns->cmds->set_peer(ns, peer_ns); +} + +/* This function should be called only at init stage of the namespace. + * It is not safe to call this function while steering operations + * are executed in the namespace. + */ +int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, + enum mlx5_flow_steering_mode mode) +{ + struct mlx5_flow_root_namespace *root; + const struct mlx5_flow_cmds *cmds; + int err; + + root = find_root(&ns->node); + if (&root->ns != ns) + /* Can't set cmds to non root namespace */ + return -EINVAL; + + if (root->table_type != FS_FT_FDB) + return -EOPNOTSUPP; + + if (root->mode == mode) + return 0; + + if (mode == MLX5_FLOW_STEERING_MODE_SMFS) + cmds = mlx5_fs_cmd_get_dr_cmds(); + else + cmds = mlx5_fs_cmd_get_fw_cmds(); + if (!cmds) + return -EOPNOTSUPP; + + err = cmds->create_ns(root); + if (err) { + mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n", + err); + return err; + } + + root->cmds->destroy_ns(root); + root->cmds = cmds; + root->mode = mode; + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index c1252d6be0ef..00717eba2256 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -37,6 +37,24 @@ #include <linux/mlx5/fs.h> #include <linux/rhashtable.h> #include <linux/llist.h> +#include <steering/fs_dr.h> + +struct mlx5_modify_hdr { + enum mlx5_flow_namespace_type ns_type; + union { + struct mlx5_fs_dr_action action; + u32 id; + }; +}; + +struct mlx5_pkt_reformat { + enum mlx5_flow_namespace_type ns_type; + int reformat_type; /* from mlx5_ifc */ + union { + struct mlx5_fs_dr_action action; + u32 id; + }; +}; /* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only, * and those are in parallel to one another when going over them to connect @@ -80,9 +98,15 @@ enum fs_fte_status { FS_FTE_STATUS_EXISTING = 1UL << 0, }; +enum mlx5_flow_steering_mode { + MLX5_FLOW_STEERING_MODE_DMFS, + MLX5_FLOW_STEERING_MODE_SMFS +}; + struct mlx5_flow_steering { struct mlx5_core_dev *dev; - struct kmem_cache *fgs_cache; + enum mlx5_flow_steering_mode mode; + struct kmem_cache *fgs_cache; struct kmem_cache *ftes_cache; struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *fdb_root_ns; @@ -128,6 +152,7 @@ struct mlx5_flow_handle { /* Type of children is mlx5_flow_group */ struct mlx5_flow_table { struct fs_node node; + struct mlx5_fs_dr_table fs_dr_table; u32 id; u16 vport; unsigned int max_fte; @@ -145,6 +170,7 @@ struct mlx5_flow_table { struct list_head fwd_rules; u32 flags; struct rhltable fgs_hash; + enum mlx5_flow_table_miss_action def_miss_action; }; struct mlx5_ft_underlay_qp { @@ -167,6 +193,7 @@ struct mlx5_ft_underlay_qp { /* Type of children is mlx5_flow_rule */ struct fs_fte { struct fs_node node; + struct mlx5_fs_dr_rule fs_dr_rule; u32 val[MLX5_ST_SZ_DW_MATCH_PARAM]; u32 dests_size; u32 index; @@ -191,6 +218,7 @@ struct fs_prio { struct mlx5_flow_namespace { /* parent == NULL => root ns */ struct fs_node node; + enum mlx5_flow_table_miss_action def_miss_action; }; struct mlx5_flow_group_mask { @@ -201,6 +229,7 @@ struct mlx5_flow_group_mask { /* Type of children is fs_fte */ struct mlx5_flow_group { struct fs_node node; + struct mlx5_fs_dr_matcher fs_dr_matcher; struct mlx5_flow_group_mask mask; u32 start_index; u32 max_ftes; @@ -212,6 +241,8 @@ struct mlx5_flow_group { struct mlx5_flow_root_namespace { struct mlx5_flow_namespace ns; + enum mlx5_flow_steering_mode mode; + struct mlx5_fs_dr_domain fs_dr_domain; enum fs_flow_table_type table_type; struct mlx5_core_dev *dev; struct mlx5_flow_table *root_ft; @@ -219,7 +250,6 @@ struct mlx5_flow_root_namespace { struct mutex chain_lock; struct list_head underlay_qpns; const struct mlx5_flow_cmds *cmds; - enum mlx5_flow_table_miss_action def_miss_action; }; int mlx5_init_fc_stats(struct mlx5_core_dev *dev); @@ -230,6 +260,14 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, unsigned long interval); +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void); + +int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_root_namespace *peer_ns); + +int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, + enum mlx5_flow_steering_mode mode); + int mlx5_init_fs(struct mlx5_core_dev *dev); void mlx5_cleanup_fs(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 1834d9f3aa1c..ab69effb056d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -40,6 +40,8 @@ #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) /* Max number of counters to query in bulk read is 32K */ #define MLX5_SW_MAX_COUNTERS_BULK BIT(15) +#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) +#define MLX5_FC_POOL_USED_BUFF_RATIO 10 struct mlx5_fc_cache { u64 packets; @@ -58,12 +60,18 @@ struct mlx5_fc { u64 lastpackets; u64 lastbytes; + struct mlx5_fc_bulk *bulk; u32 id; bool aging; struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; }; +static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev); +static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool); +static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool); +static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc); + /* locking scheme: * * It is the responsibility of the user to prevent concurrent calls or bad @@ -75,7 +83,7 @@ struct mlx5_fc { * access to counter list: * - create (user context) * - mlx5_fc_create() only adds to an addlist to be used by - * mlx5_fc_stats_query_work(). addlist is a lockless single linked list + * mlx5_fc_stats_work(). addlist is a lockless single linked list * that doesn't require any additional synchronization when adding single * node. * - spawn thread to do the actual destroy @@ -136,81 +144,87 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev, spin_unlock(&fc_stats->counters_idr_lock); } -/* The function returns the last counter that was queried so the caller - * function can continue calling it till all counters are queried. - */ -static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev, - struct mlx5_fc *first, - u32 last_id) +static int get_max_bulk_query_len(struct mlx5_core_dev *dev) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - struct mlx5_fc *counter = NULL; - struct mlx5_cmd_fc_bulk *b; - bool more = false; - u32 afirst_id; - int num; - int err; + return min_t(int, MLX5_SW_MAX_COUNTERS_BULK, + (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); +} - int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK, - (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); +static void update_counter_cache(int index, u32 *bulk_raw_data, + struct mlx5_fc_cache *cache) +{ + void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data, + flow_statistics[index]); + u64 packets = MLX5_GET64(traffic_counter, stats, packets); + u64 bytes = MLX5_GET64(traffic_counter, stats, octets); - /* first id must be aligned to 4 when using bulk query */ - afirst_id = first->id & ~0x3; + if (cache->packets == packets) + return; - /* number of counters to query inc. the last counter */ - num = ALIGN(last_id - afirst_id + 1, 4); - if (num > max_bulk) { - num = max_bulk; - last_id = afirst_id + num - 1; - } + cache->packets = packets; + cache->bytes = bytes; + cache->lastuse = jiffies; +} - b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num); - if (!b) { - mlx5_core_err(dev, "Error allocating resources for bulk query\n"); - return NULL; - } +static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev, + struct mlx5_fc *first, + u32 last_id) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + bool query_more_counters = (first->id <= last_id); + int max_bulk_len = get_max_bulk_query_len(dev); + u32 *data = fc_stats->bulk_query_out; + struct mlx5_fc *counter = first; + u32 bulk_base_id; + int bulk_len; + int err; - err = mlx5_cmd_fc_bulk_query(dev, b); - if (err) { - mlx5_core_err(dev, "Error doing bulk query: %d\n", err); - goto out; - } + while (query_more_counters) { + /* first id must be aligned to 4 when using bulk query */ + bulk_base_id = counter->id & ~0x3; - counter = first; - list_for_each_entry_from(counter, &fc_stats->counters, list) { - struct mlx5_fc_cache *c = &counter->cache; - u64 packets; - u64 bytes; + /* number of counters to query inc. the last counter */ + bulk_len = min_t(int, max_bulk_len, + ALIGN(last_id - bulk_base_id + 1, 4)); - if (counter->id > last_id) { - more = true; - break; + err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, + data); + if (err) { + mlx5_core_err(dev, "Error doing bulk query: %d\n", err); + return; } + query_more_counters = false; - mlx5_cmd_fc_bulk_get(dev, b, - counter->id, &packets, &bytes); + list_for_each_entry_from(counter, &fc_stats->counters, list) { + int counter_index = counter->id - bulk_base_id; + struct mlx5_fc_cache *cache = &counter->cache; - if (c->packets == packets) - continue; + if (counter->id >= bulk_base_id + bulk_len) { + query_more_counters = true; + break; + } - c->packets = packets; - c->bytes = bytes; - c->lastuse = jiffies; + update_counter_cache(counter_index, data, cache); + } } - -out: - mlx5_cmd_fc_bulk_free(b); - - return more ? counter : NULL; } -static void mlx5_free_fc(struct mlx5_core_dev *dev, - struct mlx5_fc *counter) +static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter) { mlx5_cmd_fc_free(dev, counter->id); kfree(counter); } +static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + if (counter->bulk) + mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter); + else + mlx5_fc_free(dev, counter); +} + static void mlx5_fc_stats_work(struct work_struct *work) { struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, @@ -234,7 +248,7 @@ static void mlx5_fc_stats_work(struct work_struct *work) llist_for_each_entry_safe(counter, tmp, dellist, dellist) { mlx5_fc_stats_remove(dev, counter); - mlx5_free_fc(dev, counter); + mlx5_fc_release(dev, counter); } if (time_before(now, fc_stats->next_query) || @@ -244,32 +258,62 @@ static void mlx5_fc_stats_work(struct work_struct *work) counter = list_first_entry(&fc_stats->counters, struct mlx5_fc, list); - while (counter) - counter = mlx5_fc_stats_query(dev, counter, last->id); + if (counter) + mlx5_fc_stats_query_counter_range(dev, counter, last->id); fc_stats->next_query = now + fc_stats->sampling_interval; } -struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) +static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev) { - struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc *counter; int err; counter = kzalloc(sizeof(*counter), GFP_KERNEL); if (!counter) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&counter->list); err = mlx5_cmd_fc_alloc(dev, &counter->id); - if (err) - goto err_out; + if (err) { + kfree(counter); + return ERR_PTR(err); + } + + return counter; +} + +static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc *counter; + + if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) { + counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool); + if (!IS_ERR(counter)) + return counter; + } + + return mlx5_fc_single_alloc(dev); +} + +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) +{ + struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging); + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + int err; + + if (IS_ERR(counter)) + return counter; + + INIT_LIST_HEAD(&counter->list); + counter->aging = aging; if (aging) { u32 id = counter->id; counter->cache.lastuse = jiffies; - counter->aging = true; + counter->lastbytes = counter->cache.bytes; + counter->lastpackets = counter->cache.packets; idr_preload(GFP_KERNEL); spin_lock(&fc_stats->counters_idr_lock); @@ -290,10 +334,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) return counter; err_out_alloc: - mlx5_cmd_fc_free(dev, counter->id); -err_out: - kfree(counter); - + mlx5_fc_release(dev, counter); return ERR_PTR(err); } EXPORT_SYMBOL(mlx5_fc_create); @@ -317,13 +358,15 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) return; } - mlx5_free_fc(dev, counter); + mlx5_fc_release(dev, counter); } EXPORT_SYMBOL(mlx5_fc_destroy); int mlx5_init_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + int max_bulk_len; + int max_out_len; spin_lock_init(&fc_stats->counters_idr_lock); idr_init(&fc_stats->counters_idr); @@ -331,14 +374,25 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) init_llist_head(&fc_stats->addlist); init_llist_head(&fc_stats->dellist); + max_bulk_len = get_max_bulk_query_len(dev); + max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len); + fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL); + if (!fc_stats->bulk_query_out) + return -ENOMEM; + fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); if (!fc_stats->wq) - return -ENOMEM; + goto err_wq_create; fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD; INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work); + mlx5_fc_pool_init(&fc_stats->fc_pool, dev); return 0; + +err_wq_create: + kfree(fc_stats->bulk_query_out); + return -ENOMEM; } void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) @@ -352,14 +406,16 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) destroy_workqueue(dev->priv.fc_stats.wq); dev->priv.fc_stats.wq = NULL; - idr_destroy(&fc_stats->counters_idr); - tmplist = llist_del_all(&fc_stats->addlist); llist_for_each_entry_safe(counter, tmp, tmplist, addlist) - mlx5_free_fc(dev, counter); + mlx5_fc_release(dev, counter); list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list) - mlx5_free_fc(dev, counter); + mlx5_fc_release(dev, counter); + + mlx5_fc_pool_cleanup(&fc_stats->fc_pool); + idr_destroy(&fc_stats->counters_idr); + kfree(fc_stats->bulk_query_out); } int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, @@ -406,3 +462,243 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, fc_stats->sampling_interval = min_t(unsigned long, interval, fc_stats->sampling_interval); } + +/* Flow counter bluks */ + +struct mlx5_fc_bulk { + struct list_head pool_list; + u32 base_id; + int bulk_len; + unsigned long *bitmask; + struct mlx5_fc fcs[0]; +}; + +static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, + u32 id) +{ + counter->bulk = bulk; + counter->id = id; +} + +static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk) +{ + return bitmap_weight(bulk->bitmask, bulk->bulk_len); +} + +static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) +{ + enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask; + struct mlx5_fc_bulk *bulk; + int err = -ENOMEM; + int bulk_len; + u32 base_id; + int i; + + alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); + bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; + + bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc), + GFP_KERNEL); + if (!bulk) + goto err_alloc_bulk; + + bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), + GFP_KERNEL); + if (!bulk->bitmask) + goto err_alloc_bitmask; + + err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id); + if (err) + goto err_mlx5_cmd_bulk_alloc; + + bulk->base_id = base_id; + bulk->bulk_len = bulk_len; + for (i = 0; i < bulk_len; i++) { + mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i); + set_bit(i, bulk->bitmask); + } + + return bulk; + +err_mlx5_cmd_bulk_alloc: + kfree(bulk->bitmask); +err_alloc_bitmask: + kfree(bulk); +err_alloc_bulk: + return ERR_PTR(err); +} + +static int +mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk) +{ + if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) { + mlx5_core_err(dev, "Freeing bulk before all counters were released\n"); + return -EBUSY; + } + + mlx5_cmd_fc_free(dev, bulk->base_id); + kfree(bulk->bitmask); + kfree(bulk); + + return 0; +} + +static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk) +{ + int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len); + + if (free_fc_index >= bulk->bulk_len) + return ERR_PTR(-ENOSPC); + + clear_bit(free_fc_index, bulk->bitmask); + return &bulk->fcs[free_fc_index]; +} + +static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc) +{ + int fc_index = fc->id - bulk->base_id; + + if (test_bit(fc_index, bulk->bitmask)) + return -EINVAL; + + set_bit(fc_index, bulk->bitmask); + return 0; +} + +/* Flow counters pool API */ + +static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev) +{ + fc_pool->dev = dev; + mutex_init(&fc_pool->pool_lock); + INIT_LIST_HEAD(&fc_pool->fully_used); + INIT_LIST_HEAD(&fc_pool->partially_used); + INIT_LIST_HEAD(&fc_pool->unused); + fc_pool->available_fcs = 0; + fc_pool->used_fcs = 0; + fc_pool->threshold = 0; +} + +static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + struct mlx5_fc_bulk *bulk; + struct mlx5_fc_bulk *tmp; + + list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list) + mlx5_fc_bulk_destroy(dev, bulk); + list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list) + mlx5_fc_bulk_destroy(dev, bulk); + list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list) + mlx5_fc_bulk_destroy(dev, bulk); +} + +static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool) +{ + fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD, + fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO); +} + +static struct mlx5_fc_bulk * +mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + struct mlx5_fc_bulk *new_bulk; + + new_bulk = mlx5_fc_bulk_create(dev); + if (!IS_ERR(new_bulk)) + fc_pool->available_fcs += new_bulk->bulk_len; + mlx5_fc_pool_update_threshold(fc_pool); + return new_bulk; +} + +static void +mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + + fc_pool->available_fcs -= bulk->bulk_len; + mlx5_fc_bulk_destroy(dev, bulk); + mlx5_fc_pool_update_threshold(fc_pool); +} + +static struct mlx5_fc * +mlx5_fc_pool_acquire_from_list(struct list_head *src_list, + struct list_head *next_list, + bool move_non_full_bulk) +{ + struct mlx5_fc_bulk *bulk; + struct mlx5_fc *fc; + + if (list_empty(src_list)) + return ERR_PTR(-ENODATA); + + bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list); + fc = mlx5_fc_bulk_acquire_fc(bulk); + if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0) + list_move(&bulk->pool_list, next_list); + return fc; +} + +static struct mlx5_fc * +mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool) +{ + struct mlx5_fc_bulk *new_bulk; + struct mlx5_fc *fc; + + mutex_lock(&fc_pool->pool_lock); + + fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used, + &fc_pool->fully_used, false); + if (IS_ERR(fc)) + fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused, + &fc_pool->partially_used, + true); + if (IS_ERR(fc)) { + new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool); + if (IS_ERR(new_bulk)) { + fc = ERR_CAST(new_bulk); + goto out; + } + fc = mlx5_fc_bulk_acquire_fc(new_bulk); + list_add(&new_bulk->pool_list, &fc_pool->partially_used); + } + fc_pool->available_fcs--; + fc_pool->used_fcs++; + +out: + mutex_unlock(&fc_pool->pool_lock); + return fc; +} + +static void +mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc) +{ + struct mlx5_core_dev *dev = fc_pool->dev; + struct mlx5_fc_bulk *bulk = fc->bulk; + int bulk_free_fcs_amount; + + mutex_lock(&fc_pool->pool_lock); + + if (mlx5_fc_bulk_release_fc(bulk, fc)) { + mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n"); + goto unlock; + } + + fc_pool->available_fcs++; + fc_pool->used_fcs--; + + bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk); + if (bulk_free_fcs_amount == 1) + list_move_tail(&bulk->pool_list, &fc_pool->partially_used); + if (bulk_free_fcs_amount == bulk->bulk_len) { + list_del(&bulk->pool_list); + if (fc_pool->available_fcs > fc_pool->threshold) + mlx5_fc_pool_free_bulk(fc_pool, bulk); + else + list_add(&bulk->pool_list, &fc_pool->unused); + } + +unlock: + mutex_unlock(&fc_pool->pool_lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 1a2560e3bf7c..3ed8ab2d703d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -279,7 +279,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) return err; } - err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0]); + err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0][0]); if (err) { mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); goto err_destroy_underlay_qp; @@ -296,7 +296,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) { struct mlx5i_priv *ipriv = priv->ppriv; - mlx5e_destroy_tis(priv->mdev, priv->tisn[0]); + mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]); mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index c5a491e22e55..96e64187c089 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -210,7 +210,7 @@ static int mlx5i_pkey_open(struct net_device *netdev) goto err_unint_underlay_qp; } - err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0]); + err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0][0]); if (err) { mlx5_core_warn(mdev, "create child tis failed, %d\n", err); goto err_remove_rx_uderlay_qp; @@ -228,7 +228,7 @@ static int mlx5i_pkey_open(struct net_device *netdev) return 0; err_clear_state_opened_flag: - mlx5e_destroy_tis(mdev, epriv->tisn[0]); + mlx5e_destroy_tis(mdev, epriv->tisn[0][0]); err_remove_rx_uderlay_qp: mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); err_unint_underlay_qp: @@ -257,7 +257,7 @@ static int mlx5i_pkey_close(struct net_device *netdev) mlx5i_uninit_underlay_qp(priv); mlx5e_deactivate_priv_channels(priv); mlx5e_close_channels(&priv->channels); - mlx5e_destroy_tis(mdev, priv->tisn[0]); + mlx5e_destroy_tis(mdev, priv->tisn[0][0]); unlock: mutex_unlock(&priv->state_lock); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index e69766393990..5d20d615663e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -248,6 +248,9 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, struct net_device *fib_dev; struct fib_info *fi; + if (!net_eq(info->net, &init_net)) + return NOTIFY_DONE; + if (info->family != AF_INET) return NOTIFY_DONE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c index ea1d4d26ece0..3fc575d1c3ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c @@ -2,6 +2,7 @@ // Copyright (c) 2019 Mellanox Technologies. #include "mlx5_core.h" +#include "lib/mlx5.h" int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, void *key, u32 sz_bytes, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c new file mode 100644 index 000000000000..e065c2f68f5a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2019 Mellanox Technologies + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/device.h> + +#include "mlx5_core.h" +#include "lib/mlx5.h" + +struct mlx5_dm { + /* protect access to icm bitmask */ + spinlock_t lock; + unsigned long *steering_sw_icm_alloc_blocks; + unsigned long *header_modify_sw_icm_alloc_blocks; +}; + +struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) +{ + u64 header_modify_icm_blocks = 0; + u64 steering_icm_blocks = 0; + struct mlx5_dm *dm; + + if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)) + return 0; + + dm = kzalloc(sizeof(*dm), GFP_KERNEL); + if (!dm) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&dm->lock); + + if (MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address)) { + steering_icm_blocks = + BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) - + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); + + dm->steering_sw_icm_alloc_blocks = + kcalloc(BITS_TO_LONGS(steering_icm_blocks), + sizeof(unsigned long), GFP_KERNEL); + if (!dm->steering_sw_icm_alloc_blocks) + goto err_steering; + } + + if (MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address)) { + header_modify_icm_blocks = + BIT(MLX5_CAP_DEV_MEM(dev, log_header_modify_sw_icm_size) - + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); + + dm->header_modify_sw_icm_alloc_blocks = + kcalloc(BITS_TO_LONGS(header_modify_icm_blocks), + sizeof(unsigned long), GFP_KERNEL); + if (!dm->header_modify_sw_icm_alloc_blocks) + goto err_modify_hdr; + } + + return dm; + +err_modify_hdr: + kfree(dm->steering_sw_icm_alloc_blocks); + +err_steering: + kfree(dm); + + return ERR_PTR(-ENOMEM); +} + +void mlx5_dm_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_dm *dm = dev->dm; + + if (!dev->dm) + return; + + if (dm->steering_sw_icm_alloc_blocks) { + WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks, + BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) - + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)))); + kfree(dm->steering_sw_icm_alloc_blocks); + } + + if (dm->header_modify_sw_icm_alloc_blocks) { + WARN_ON(!bitmap_empty(dm->header_modify_sw_icm_alloc_blocks, + BIT(MLX5_CAP_DEV_MEM(dev, + log_header_modify_sw_icm_size) - + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)))); + kfree(dm->header_modify_sw_icm_alloc_blocks); + } + + kfree(dm); +} + +int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, + u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id) +{ + u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev)); + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {}; + struct mlx5_dm *dm = dev->dm; + unsigned long *block_map; + u64 icm_start_addr; + u32 log_icm_size; + u32 max_blocks; + u64 block_idx; + void *sw_icm; + int ret; + + if (!dev->dm) + return -EOPNOTSUPP; + + if (!length || (length & (length - 1)) || + length & (MLX5_SW_ICM_BLOCK_SIZE(dev) - 1)) + return -EINVAL; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM); + MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid); + + switch (type) { + case MLX5_SW_ICM_TYPE_STEERING: + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address); + log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size); + block_map = dm->steering_sw_icm_alloc_blocks; + break; + case MLX5_SW_ICM_TYPE_HEADER_MODIFY: + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address); + log_icm_size = MLX5_CAP_DEV_MEM(dev, + log_header_modify_sw_icm_size); + block_map = dm->header_modify_sw_icm_alloc_blocks; + break; + default: + return -EINVAL; + } + + if (!block_map) + return -EOPNOTSUPP; + + max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); + spin_lock(&dm->lock); + block_idx = bitmap_find_next_zero_area(block_map, + max_blocks, + 0, + num_blocks, 0); + + if (block_idx < max_blocks) + bitmap_set(block_map, + block_idx, num_blocks); + + spin_unlock(&dm->lock); + + if (block_idx >= max_blocks) + return -ENOMEM; + + sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm); + icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); + MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr, + icm_start_addr); + MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length)); + + ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (ret) { + spin_lock(&dm->lock); + bitmap_clear(block_map, + block_idx, num_blocks); + spin_unlock(&dm->lock); + + return ret; + } + + *addr = icm_start_addr; + *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_alloc); + +int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, + u64 length, u16 uid, phys_addr_t addr, u32 obj_id) +{ + u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev)); + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + struct mlx5_dm *dm = dev->dm; + unsigned long *block_map; + u64 icm_start_addr; + u64 start_idx; + int err; + + if (!dev->dm) + return -EOPNOTSUPP; + + switch (type) { + case MLX5_SW_ICM_TYPE_STEERING: + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address); + block_map = dm->steering_sw_icm_alloc_blocks; + break; + case MLX5_SW_ICM_TYPE_HEADER_MODIFY: + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address); + block_map = dm->header_modify_sw_icm_alloc_blocks; + break; + default: + return -EINVAL; + } + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, + MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); + MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid); + + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + start_idx = (addr - icm_start_addr) >> MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); + spin_lock(&dm->lock); + bitmap_clear(block_map, + start_idx, num_blocks); + spin_unlock(&dm->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_dealloc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h index 3dfab91ae5f2..4be4d2d36218 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h @@ -87,7 +87,7 @@ void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); /* This function should only be called after mlx5_cmd_force_teardown_hca */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c new file mode 100644 index 000000000000..583dc7e2aca8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2018 Mellanox Technologies + +#include <linux/hyperv.h> +#include "mlx5_core.h" +#include "lib/hv.h" + +static int mlx5_hv_config_common(struct mlx5_core_dev *dev, void *buf, int len, + int offset, bool read) +{ + int rc = -EOPNOTSUPP; + int bytes_returned; + int block_id; + + if (offset % HV_CONFIG_BLOCK_SIZE_MAX || len != HV_CONFIG_BLOCK_SIZE_MAX) + return -EINVAL; + + block_id = offset / HV_CONFIG_BLOCK_SIZE_MAX; + + rc = read ? + hyperv_read_cfg_blk(dev->pdev, buf, + HV_CONFIG_BLOCK_SIZE_MAX, block_id, + &bytes_returned) : + hyperv_write_cfg_blk(dev->pdev, buf, + HV_CONFIG_BLOCK_SIZE_MAX, block_id); + + /* Make sure len bytes were read successfully */ + if (read && !rc && len != bytes_returned) + rc = -EIO; + + if (rc) { + mlx5_core_err(dev, "Failed to %s hv config, err = %d, len = %d, offset = %d\n", + read ? "read" : "write", rc, len, + offset); + return rc; + } + + return 0; +} + +int mlx5_hv_read_config(struct mlx5_core_dev *dev, void *buf, int len, + int offset) +{ + return mlx5_hv_config_common(dev, buf, len, offset, true); +} + +int mlx5_hv_write_config(struct mlx5_core_dev *dev, void *buf, int len, + int offset) +{ + return mlx5_hv_config_common(dev, buf, len, offset, false); +} + +int mlx5_hv_register_invalidate(struct mlx5_core_dev *dev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)) +{ + return hyperv_reg_block_invalidate(dev->pdev, context, + block_invalidate); +} + +void mlx5_hv_unregister_invalidate(struct mlx5_core_dev *dev) +{ + hyperv_reg_block_invalidate(dev->pdev, NULL, NULL); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h new file mode 100644 index 000000000000..f9a45573f459 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __LIB_HV_H__ +#define __LIB_HV_H__ + +#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) + +#include <linux/hyperv.h> +#include <linux/mlx5/driver.h> + +int mlx5_hv_read_config(struct mlx5_core_dev *dev, void *buf, int len, + int offset); +int mlx5_hv_write_config(struct mlx5_core_dev *dev, void *buf, int len, + int offset); +int mlx5_hv_register_invalidate(struct mlx5_core_dev *dev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)); +void mlx5_hv_unregister_invalidate(struct mlx5_core_dev *dev); +#endif + +#endif /* __LIB_HV_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c new file mode 100644 index 000000000000..4047629a876b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2018 Mellanox Technologies + +#include <linux/hyperv.h> +#include "mlx5_core.h" +#include "lib/hv.h" +#include "lib/hv_vhca.h" + +struct mlx5_hv_vhca { + struct mlx5_core_dev *dev; + struct workqueue_struct *work_queue; + struct mlx5_hv_vhca_agent *agents[MLX5_HV_VHCA_AGENT_MAX]; + struct mutex agents_lock; /* Protect agents array */ +}; + +struct mlx5_hv_vhca_work { + struct work_struct invalidate_work; + struct mlx5_hv_vhca *hv_vhca; + u64 block_mask; +}; + +struct mlx5_hv_vhca_data_block { + u16 sequence; + u16 offset; + u8 reserved[4]; + u64 data[15]; +}; + +struct mlx5_hv_vhca_agent { + enum mlx5_hv_vhca_agent_type type; + struct mlx5_hv_vhca *hv_vhca; + void *priv; + u16 seq; + void (*control)(struct mlx5_hv_vhca_agent *agent, + struct mlx5_hv_vhca_control_block *block); + void (*invalidate)(struct mlx5_hv_vhca_agent *agent, + u64 block_mask); + void (*cleanup)(struct mlx5_hv_vhca_agent *agent); +}; + +struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev) +{ + struct mlx5_hv_vhca *hv_vhca = NULL; + + hv_vhca = kzalloc(sizeof(*hv_vhca), GFP_KERNEL); + if (!hv_vhca) + return ERR_PTR(-ENOMEM); + + hv_vhca->work_queue = create_singlethread_workqueue("mlx5_hv_vhca"); + if (!hv_vhca->work_queue) { + kfree(hv_vhca); + return ERR_PTR(-ENOMEM); + } + + hv_vhca->dev = dev; + mutex_init(&hv_vhca->agents_lock); + + return hv_vhca; +} + +void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca) +{ + if (IS_ERR_OR_NULL(hv_vhca)) + return; + + destroy_workqueue(hv_vhca->work_queue); + kfree(hv_vhca); +} + +static void mlx5_hv_vhca_invalidate_work(struct work_struct *work) +{ + struct mlx5_hv_vhca_work *hwork; + struct mlx5_hv_vhca *hv_vhca; + int i; + + hwork = container_of(work, struct mlx5_hv_vhca_work, invalidate_work); + hv_vhca = hwork->hv_vhca; + + mutex_lock(&hv_vhca->agents_lock); + for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) { + struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i]; + + if (!agent || !agent->invalidate) + continue; + + if (!(BIT(agent->type) & hwork->block_mask)) + continue; + + agent->invalidate(agent, hwork->block_mask); + } + mutex_unlock(&hv_vhca->agents_lock); + + kfree(hwork); +} + +void mlx5_hv_vhca_invalidate(void *context, u64 block_mask) +{ + struct mlx5_hv_vhca *hv_vhca = (struct mlx5_hv_vhca *)context; + struct mlx5_hv_vhca_work *work; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + INIT_WORK(&work->invalidate_work, mlx5_hv_vhca_invalidate_work); + work->hv_vhca = hv_vhca; + work->block_mask = block_mask; + + queue_work(hv_vhca->work_queue, &work->invalidate_work); +} + +#define AGENT_MASK(type) (type ? BIT(type - 1) : 0 /* control */) + +static void mlx5_hv_vhca_agents_control(struct mlx5_hv_vhca *hv_vhca, + struct mlx5_hv_vhca_control_block *block) +{ + int i; + + for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) { + struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i]; + + if (!agent || !agent->control) + continue; + + if (!(AGENT_MASK(agent->type) & block->control)) + continue; + + agent->control(agent, block); + } +} + +static void mlx5_hv_vhca_capabilities(struct mlx5_hv_vhca *hv_vhca, + u32 *capabilities) +{ + int i; + + for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) { + struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i]; + + if (agent) + *capabilities |= AGENT_MASK(agent->type); + } +} + +static void +mlx5_hv_vhca_control_agent_invalidate(struct mlx5_hv_vhca_agent *agent, + u64 block_mask) +{ + struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca; + struct mlx5_core_dev *dev = hv_vhca->dev; + struct mlx5_hv_vhca_control_block *block; + u32 capabilities = 0; + int err; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return; + + err = mlx5_hv_read_config(dev, block, sizeof(*block), 0); + if (err) + goto free_block; + + mlx5_hv_vhca_capabilities(hv_vhca, &capabilities); + + /* In case no capabilities, send empty block in return */ + if (!capabilities) { + memset(block, 0, sizeof(*block)); + goto write; + } + + if (block->capabilities != capabilities) + block->capabilities = capabilities; + + if (block->control & ~capabilities) + goto free_block; + + mlx5_hv_vhca_agents_control(hv_vhca, block); + block->command_ack = block->command; + +write: + mlx5_hv_write_config(dev, block, sizeof(*block), 0); + +free_block: + kfree(block); +} + +static struct mlx5_hv_vhca_agent * +mlx5_hv_vhca_control_agent_create(struct mlx5_hv_vhca *hv_vhca) +{ + return mlx5_hv_vhca_agent_create(hv_vhca, MLX5_HV_VHCA_AGENT_CONTROL, + NULL, + mlx5_hv_vhca_control_agent_invalidate, + NULL, NULL); +} + +static void mlx5_hv_vhca_control_agent_destroy(struct mlx5_hv_vhca_agent *agent) +{ + mlx5_hv_vhca_agent_destroy(agent); +} + +int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca) +{ + struct mlx5_hv_vhca_agent *agent; + int err; + + if (IS_ERR_OR_NULL(hv_vhca)) + return IS_ERR_OR_NULL(hv_vhca); + + err = mlx5_hv_register_invalidate(hv_vhca->dev, hv_vhca, + mlx5_hv_vhca_invalidate); + if (err) + return err; + + agent = mlx5_hv_vhca_control_agent_create(hv_vhca); + if (IS_ERR_OR_NULL(agent)) { + mlx5_hv_unregister_invalidate(hv_vhca->dev); + return IS_ERR_OR_NULL(agent); + } + + hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL] = agent; + + return 0; +} + +void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca) +{ + struct mlx5_hv_vhca_agent *agent; + int i; + + if (IS_ERR_OR_NULL(hv_vhca)) + return; + + agent = hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL]; + if (agent) + mlx5_hv_vhca_control_agent_destroy(agent); + + mutex_lock(&hv_vhca->agents_lock); + for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) + WARN_ON(hv_vhca->agents[i]); + + mutex_unlock(&hv_vhca->agents_lock); + + mlx5_hv_unregister_invalidate(hv_vhca->dev); +} + +static void mlx5_hv_vhca_agents_update(struct mlx5_hv_vhca *hv_vhca) +{ + mlx5_hv_vhca_invalidate(hv_vhca, BIT(MLX5_HV_VHCA_AGENT_CONTROL)); +} + +struct mlx5_hv_vhca_agent * +mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca, + enum mlx5_hv_vhca_agent_type type, + void (*control)(struct mlx5_hv_vhca_agent*, + struct mlx5_hv_vhca_control_block *block), + void (*invalidate)(struct mlx5_hv_vhca_agent*, + u64 block_mask), + void (*cleaup)(struct mlx5_hv_vhca_agent *agent), + void *priv) +{ + struct mlx5_hv_vhca_agent *agent; + + if (IS_ERR_OR_NULL(hv_vhca)) + return ERR_PTR(-ENOMEM); + + if (type >= MLX5_HV_VHCA_AGENT_MAX) + return ERR_PTR(-EINVAL); + + mutex_lock(&hv_vhca->agents_lock); + if (hv_vhca->agents[type]) { + mutex_unlock(&hv_vhca->agents_lock); + return ERR_PTR(-EINVAL); + } + mutex_unlock(&hv_vhca->agents_lock); + + agent = kzalloc(sizeof(*agent), GFP_KERNEL); + if (!agent) + return ERR_PTR(-ENOMEM); + + agent->type = type; + agent->hv_vhca = hv_vhca; + agent->priv = priv; + agent->control = control; + agent->invalidate = invalidate; + agent->cleanup = cleaup; + + mutex_lock(&hv_vhca->agents_lock); + hv_vhca->agents[type] = agent; + mutex_unlock(&hv_vhca->agents_lock); + + mlx5_hv_vhca_agents_update(hv_vhca); + + return agent; +} + +void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent) +{ + struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca; + + mutex_lock(&hv_vhca->agents_lock); + + if (WARN_ON(agent != hv_vhca->agents[agent->type])) { + mutex_unlock(&hv_vhca->agents_lock); + return; + } + + hv_vhca->agents[agent->type] = NULL; + mutex_unlock(&hv_vhca->agents_lock); + + if (agent->cleanup) + agent->cleanup(agent); + + kfree(agent); + + mlx5_hv_vhca_agents_update(hv_vhca); +} + +static int mlx5_hv_vhca_data_block_prepare(struct mlx5_hv_vhca_agent *agent, + struct mlx5_hv_vhca_data_block *data_block, + void *src, int len, int *offset) +{ + int bytes = min_t(int, (int)sizeof(data_block->data), len); + + data_block->sequence = agent->seq; + data_block->offset = (*offset)++; + memcpy(data_block->data, src, bytes); + + return bytes; +} + +static void mlx5_hv_vhca_agent_seq_update(struct mlx5_hv_vhca_agent *agent) +{ + agent->seq++; +} + +int mlx5_hv_vhca_agent_write(struct mlx5_hv_vhca_agent *agent, + void *buf, int len) +{ + int offset = agent->type * HV_CONFIG_BLOCK_SIZE_MAX; + int block_offset = 0; + int total = 0; + int err; + + while (len) { + struct mlx5_hv_vhca_data_block data_block = {0}; + int bytes; + + bytes = mlx5_hv_vhca_data_block_prepare(agent, &data_block, + buf + total, + len, &block_offset); + if (!bytes) + return -ENOMEM; + + err = mlx5_hv_write_config(agent->hv_vhca->dev, &data_block, + sizeof(data_block), offset); + if (err) + return err; + + total += bytes; + len -= bytes; + } + + mlx5_hv_vhca_agent_seq_update(agent); + + return 0; +} + +void *mlx5_hv_vhca_agent_priv(struct mlx5_hv_vhca_agent *agent) +{ + return agent->priv; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h new file mode 100644 index 000000000000..4bad6a5fde56 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __LIB_HV_VHCA_H__ +#define __LIB_HV_VHCA_H__ + +#include "en.h" +#include "lib/hv.h" + +struct mlx5_hv_vhca_agent; +struct mlx5_hv_vhca; +struct mlx5_hv_vhca_control_block; + +enum mlx5_hv_vhca_agent_type { + MLX5_HV_VHCA_AGENT_CONTROL = 0, + MLX5_HV_VHCA_AGENT_STATS = 1, + MLX5_HV_VHCA_AGENT_MAX = 32, +}; + +#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) + +struct mlx5_hv_vhca_control_block { + u32 capabilities; + u32 control; + u16 command; + u16 command_ack; + u16 version; + u16 rings; + u32 reserved1[28]; +}; + +struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev); +void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca); +int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca); +void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca); +void mlx5_hv_vhca_invalidate(void *context, u64 block_mask); + +struct mlx5_hv_vhca_agent * +mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca, + enum mlx5_hv_vhca_agent_type type, + void (*control)(struct mlx5_hv_vhca_agent*, + struct mlx5_hv_vhca_control_block *block), + void (*invalidate)(struct mlx5_hv_vhca_agent*, + u64 block_mask), + void (*cleanup)(struct mlx5_hv_vhca_agent *agent), + void *context); + +void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent); +int mlx5_hv_vhca_agent_write(struct mlx5_hv_vhca_agent *agent, + void *buf, int len); +void *mlx5_hv_vhca_agent_priv(struct mlx5_hv_vhca_agent *agent); + +#else + +static inline struct mlx5_hv_vhca * +mlx5_hv_vhca_create(struct mlx5_core_dev *dev) +{ + return NULL; +} + +static inline void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca) +{ +} + +static inline int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca) +{ + return 0; +} + +static inline void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca) +{ +} + +static inline void mlx5_hv_vhca_invalidate(void *context, + u64 block_mask) +{ +} + +static inline struct mlx5_hv_vhca_agent * +mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca, + enum mlx5_hv_vhca_agent_type type, + void (*control)(struct mlx5_hv_vhca_agent*, + struct mlx5_hv_vhca_control_block *block), + void (*invalidate)(struct mlx5_hv_vhca_agent*, + u64 block_mask), + void (*cleanup)(struct mlx5_hv_vhca_agent *agent), + void *context) +{ + return NULL; +} + +static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent) +{ +} + +static inline int +mlx5_hv_vhca_write_agent(struct mlx5_hv_vhca_agent *agent, + void *buf, int len) +{ + return 0; +} +#endif + +#endif /* __LIB_HV_VHCA_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c index b9d4f4e19ff9..148b55c3db7a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c @@ -32,6 +32,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/refcount.h> #include <linux/mlx5/driver.h> #include <net/vxlan.h> #include "mlx5_core.h" @@ -48,7 +49,7 @@ struct mlx5_vxlan { struct mlx5_vxlan_port { struct hlist_node hlist; - atomic_t refcount; + refcount_t refcount; u16 udp_port; }; @@ -113,7 +114,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) vxlanp = mlx5_vxlan_lookup_port(vxlan, port); if (vxlanp) { - atomic_inc(&vxlanp->refcount); + refcount_inc(&vxlanp->refcount); return 0; } @@ -137,7 +138,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) } vxlanp->udp_port = port; - atomic_set(&vxlanp->refcount, 1); + refcount_set(&vxlanp->refcount, 1); spin_lock_bh(&vxlan->lock); hash_add(vxlan->htable, &vxlanp->hlist, port); @@ -170,7 +171,7 @@ int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) goto out_unlock; } - if (atomic_dec_and_test(&vxlanp->refcount)) { + if (refcount_dec_and_test(&vxlanp->refcount)) { hash_del(&vxlanp->hlist); remove = true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index b15b27a497fc..9648c2297803 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -69,6 +69,7 @@ #include "lib/pci_vsc.h" #include "diag/fw_tracer.h" #include "ecpf.h" +#include "lib/hv_vhca.h" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); @@ -495,6 +496,12 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev) ODP_CAP_SET_MAX(dev, xrc_odp_caps.write); ODP_CAP_SET_MAX(dev, xrc_odp_caps.read); ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic); + ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive); + ODP_CAP_SET_MAX(dev, dc_odp_caps.send); + ODP_CAP_SET_MAX(dev, dc_odp_caps.receive); + ODP_CAP_SET_MAX(dev, dc_odp_caps.write); + ODP_CAP_SET_MAX(dev, dc_odp_caps.read); + ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic); if (do_set) err = set_caps(dev, set_ctx, set_sz, @@ -826,11 +833,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) goto err_eq_cleanup; } - err = mlx5_cq_debugfs_init(dev); - if (err) { - mlx5_core_err(dev, "failed to initialize cq debugfs\n"); - goto err_events_cleanup; - } + mlx5_cq_debugfs_init(dev); mlx5_init_qp_table(dev); @@ -873,7 +876,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) goto err_eswitch_cleanup; } + dev->dm = mlx5_dm_create(dev); + if (IS_ERR(dev->dm)) + mlx5_core_warn(dev, "Failed to init device memory%d\n", err); + dev->tracer = mlx5_fw_tracer_create(dev); + dev->hv_vhca = mlx5_hv_vhca_create(dev); return 0; @@ -891,7 +899,6 @@ err_tables_cleanup: mlx5_cleanup_mkey_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cq_debugfs_cleanup(dev); -err_events_cleanup: mlx5_events_cleanup(dev); err_eq_cleanup: mlx5_eq_table_cleanup(dev); @@ -905,7 +912,9 @@ err_devcom: static void mlx5_cleanup_once(struct mlx5_core_dev *dev) { + mlx5_hv_vhca_destroy(dev->hv_vhca); mlx5_fw_tracer_destroy(dev->tracer); + mlx5_dm_cleanup(dev); mlx5_fpga_cleanup(dev); mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_sriov_cleanup(dev); @@ -1072,6 +1081,8 @@ static int mlx5_load(struct mlx5_core_dev *dev) goto err_fw_tracer; } + mlx5_hv_vhca_init(dev->hv_vhca); + err = mlx5_fpga_device_start(dev); if (err) { mlx5_core_err(dev, "fpga device start failed %d\n", err); @@ -1127,6 +1138,7 @@ err_tls_start: err_ipsec_start: mlx5_fpga_device_stop(dev); err_fpga_start: + mlx5_hv_vhca_cleanup(dev->hv_vhca); mlx5_fw_tracer_cleanup(dev->tracer); err_fw_tracer: mlx5_eq_table_destroy(dev); @@ -1147,6 +1159,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev) mlx5_accel_ipsec_cleanup(dev); mlx5_accel_tls_cleanup(dev); mlx5_fpga_device_stop(dev); + mlx5_hv_vhca_cleanup(dev->hv_vhca); mlx5_fw_tracer_cleanup(dev->tracer); mlx5_eq_table_destroy(dev); mlx5_irq_table_destroy(dev); @@ -1217,8 +1230,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) { int err = 0; - if (cleanup) + if (cleanup) { + mlx5_unregister_device(dev); mlx5_drain_health_wq(dev); + } mutex_lock(&dev->intf_state_mutex); if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { @@ -1369,7 +1384,6 @@ static void remove_one(struct pci_dev *pdev) mlx5_crdump_disable(dev); mlx5_devlink_unregister(devlink); - mlx5_unregister_device(dev); if (mlx5_unload_one(dev, true)) { mlx5_core_err(dev, "mlx5_unload_one failed\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 471bbc48bc1f..b100489dc85c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -146,7 +146,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); void mlx5_cmd_flush(struct mlx5_core_dev *dev); -int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, @@ -198,6 +198,9 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); +struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev); +void mlx5_dm_cleanup(struct mlx5_core_dev *dev); + #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \ MLX5_CAP_GEN((mdev), pps_modify) && \ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index b8ba74de9555..c3aea4cc2fff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -53,7 +53,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) common = radix_tree_lookup(&table->tree, rsn); if (common) - atomic_inc(&common->refcount); + refcount_inc(&common->refcount); spin_unlock_irqrestore(&table->lock, flags); @@ -62,7 +62,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common) { - if (atomic_dec_and_test(&common->refcount)) + if (refcount_dec_and_test(&common->refcount)) complete(&common->free); } @@ -162,7 +162,7 @@ static int rsc_event_notifier(struct notifier_block *nb, common = mlx5_get_rsc(table, rsn); if (!common) { - mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn); + mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn); return NOTIFY_OK; } @@ -209,7 +209,7 @@ static int create_resource_common(struct mlx5_core_dev *dev, if (err) return err; - atomic_set(&qp->common.refcount, 1); + refcount_set(&qp->common.refcount, 1); init_completion(&qp->common.free); qp->pid = current->pid; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c index 17ce9dd56b13..0fc7de4aa572 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c @@ -14,9 +14,6 @@ static void mlx5_rdma_disable_roce_steering(struct mlx5_core_dev *dev) { struct mlx5_core_roce *roce = &dev->priv.roce; - if (!roce->ft) - return; - mlx5_del_flow_rules(roce->allow_rule); mlx5_destroy_flow_group(roce->fg); mlx5_destroy_flow_table(roce->ft); @@ -51,7 +48,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) return -ENOMEM; } - ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX); + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL); if (!ns) { mlx5_core_err(dev, "Failed to get RDMA RX namespace"); err = -EOPNOTSUPP; @@ -145,6 +142,11 @@ static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev) void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) { + struct mlx5_core_roce *roce = &dev->priv.roce; + + if (!roce->ft) + return; + mlx5_rdma_disable_roce_steering(dev); mlx5_rdma_del_roce_addr(dev); mlx5_nic_vport_disable_roce(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/steering/Makefile new file mode 100644 index 000000000000..c78512eed8d7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c new file mode 100644 index 000000000000..7d81a7735de5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -0,0 +1,1588 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include "dr_types.h" + +enum dr_action_domain { + DR_ACTION_DOMAIN_NIC_INGRESS, + DR_ACTION_DOMAIN_NIC_EGRESS, + DR_ACTION_DOMAIN_FDB_INGRESS, + DR_ACTION_DOMAIN_FDB_EGRESS, + DR_ACTION_DOMAIN_MAX, +}; + +enum dr_action_valid_state { + DR_ACTION_STATE_ERR, + DR_ACTION_STATE_NO_ACTION, + DR_ACTION_STATE_REFORMAT, + DR_ACTION_STATE_MODIFY_HDR, + DR_ACTION_STATE_MODIFY_VLAN, + DR_ACTION_STATE_NON_TERM, + DR_ACTION_STATE_TERM, + DR_ACTION_STATE_MAX, +}; + +static const enum dr_action_valid_state +next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = { + [DR_ACTION_DOMAIN_NIC_INGRESS] = { + [DR_ACTION_STATE_NO_ACTION] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM, + [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + }, + [DR_ACTION_STATE_REFORMAT] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + }, + [DR_ACTION_STATE_MODIFY_HDR] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR, + }, + [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + }, + [DR_ACTION_STATE_NON_TERM] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM, + [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + }, + [DR_ACTION_STATE_TERM] = { + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, + }, + }, + [DR_ACTION_DOMAIN_NIC_EGRESS] = { + [DR_ACTION_STATE_NO_ACTION] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + }, + [DR_ACTION_STATE_REFORMAT] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT, + }, + [DR_ACTION_STATE_MODIFY_HDR] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + }, + [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT, + }, + [DR_ACTION_STATE_NON_TERM] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + }, + [DR_ACTION_STATE_TERM] = { + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, + }, + }, + [DR_ACTION_DOMAIN_FDB_INGRESS] = { + [DR_ACTION_STATE_NO_ACTION] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM, + [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_REFORMAT] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_MODIFY_HDR] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + }, + [DR_ACTION_STATE_NON_TERM] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM, + [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_TERM] = { + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, + }, + }, + [DR_ACTION_DOMAIN_FDB_EGRESS] = { + [DR_ACTION_STATE_NO_ACTION] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_REFORMAT] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_MODIFY_HDR] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_NON_TERM] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_TERM] = { + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, + }, + }, +}; + +struct dr_action_modify_field_conv { + u16 hw_field; + u8 start; + u8 end; + u8 l3_type; + u8 l4_type; +}; + +static const struct dr_action_modify_field_conv dr_action_conv_arr[] = { + [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 16, .end = 47, + }, + [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 32, .end = 47, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 16, .end = 47, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 0, .end = 5, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 48, .end = 56, + .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15, + .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31, + .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15, + .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31, + .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 32, .end = 63, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 0, .end = 31, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 32, .end = 63, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 0, .end = 31, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 32, .end = 63, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 0, .end = 31, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63, + .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 32, .end = 63, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 32, .end = 63, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 32, .end = 63, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 32, .end = 63, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 32, .end = 63, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { + .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 0, .end = 15, + }, +}; + +#define MAX_VLANS 2 +struct dr_action_vlan_info { + int count; + u32 headers[MAX_VLANS]; +}; + +struct dr_action_apply_attr { + u32 modify_index; + u16 modify_actions; + u32 decap_index; + u16 decap_actions; + u8 decap_with_vlan:1; + u64 final_icm_addr; + u32 flow_tag; + u32 ctr_id; + u16 gvmi; + u16 hit_gvmi; + u32 reformat_id; + u32 reformat_size; + struct dr_action_vlan_info vlans; +}; + +static int +dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type, + enum mlx5dr_action_type *action_type) +{ + switch (reformat_type) { + case DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2: + *action_type = DR_ACTION_TYP_TNL_L2_TO_L2; + break; + case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2: + *action_type = DR_ACTION_TYP_L2_TO_TNL_L2; + break; + case DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2: + *action_type = DR_ACTION_TYP_TNL_L3_TO_L2; + break; + case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3: + *action_type = DR_ACTION_TYP_L2_TO_TNL_L3; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void dr_actions_init_next_ste(u8 **last_ste, + u32 *added_stes, + enum mlx5dr_ste_entry_type entry_type, + u16 gvmi) +{ + (*added_stes)++; + *last_ste += DR_STE_SIZE; + mlx5dr_ste_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, entry_type, gvmi); +} + +static void dr_actions_apply_tx(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u8 *last_ste, + struct dr_action_apply_attr *attr, + u32 *added_stes) +{ + bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] || + action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]; + + /* We want to make sure the modify header comes before L2 + * encapsulation. The reason for that is that we support + * modify headers for outer headers only + */ + if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { + mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + mlx5dr_ste_set_rewrite_actions(last_ste, + attr->modify_actions, + attr->modify_index); + } + + if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) { + int i; + + for (i = 0; i < attr->vlans.count; i++) { + if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR]) + dr_actions_init_next_ste(&last_ste, + added_stes, + MLX5DR_STE_TYPE_TX, + attr->gvmi); + + mlx5dr_ste_set_tx_push_vlan(last_ste, + attr->vlans.headers[i], + encap); + } + } + + if (encap) { + /* Modify header and encapsulation require a different STEs. + * Since modify header STE format doesn't support encapsulation + * tunneling_action. + */ + if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] || + action_type_set[DR_ACTION_TYP_PUSH_VLAN]) + dr_actions_init_next_ste(&last_ste, + added_stes, + MLX5DR_STE_TYPE_TX, + attr->gvmi); + + mlx5dr_ste_set_tx_encap(last_ste, + attr->reformat_id, + attr->reformat_size, + action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]); + /* Whenever prio_tag_required enabled, we can be sure that the + * previous table (ACL) already push vlan to our packet, + * And due to HW limitation we need to set this bit, otherwise + * push vlan + reformat will not work. + */ + if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required)) + mlx5dr_ste_set_go_back_bit(last_ste); + } + + if (action_type_set[DR_ACTION_TYP_CTR]) + mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id); +} + +static void dr_actions_apply_rx(u8 *action_type_set, + u8 *last_ste, + struct dr_action_apply_attr *attr, + u32 *added_stes) +{ + if (action_type_set[DR_ACTION_TYP_CTR]) + mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id); + + if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { + mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + mlx5dr_ste_set_rx_decap_l3(last_ste, attr->decap_with_vlan); + mlx5dr_ste_set_rewrite_actions(last_ste, + attr->decap_actions, + attr->decap_index); + } + + if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) + mlx5dr_ste_set_rx_decap(last_ste); + + if (action_type_set[DR_ACTION_TYP_POP_VLAN]) { + int i; + + for (i = 0; i < attr->vlans.count; i++) { + if (i || + action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] || + action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) + dr_actions_init_next_ste(&last_ste, + added_stes, + MLX5DR_STE_TYPE_RX, + attr->gvmi); + + mlx5dr_ste_set_rx_pop_vlan(last_ste); + } + } + + if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { + if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) + dr_actions_init_next_ste(&last_ste, + added_stes, + MLX5DR_STE_TYPE_MODIFY_PKT, + attr->gvmi); + else + mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + + mlx5dr_ste_set_rewrite_actions(last_ste, + attr->modify_actions, + attr->modify_index); + } + + if (action_type_set[DR_ACTION_TYP_TAG]) { + if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) + dr_actions_init_next_ste(&last_ste, + added_stes, + MLX5DR_STE_TYPE_RX, + attr->gvmi); + + mlx5dr_ste_rx_set_flow_tag(last_ste, attr->flow_tag); + } +} + +/* Apply the actions on the rule STE array starting from the last_ste. + * Actions might require more than one STE, new_num_stes will return + * the new size of the STEs array, rule with actions. + */ +static void dr_actions_apply(struct mlx5dr_domain *dmn, + enum mlx5dr_ste_entry_type ste_type, + u8 *action_type_set, + u8 *last_ste, + struct dr_action_apply_attr *attr, + u32 *new_num_stes) +{ + u32 added_stes = 0; + + if (ste_type == MLX5DR_STE_TYPE_RX) + dr_actions_apply_rx(action_type_set, last_ste, attr, &added_stes); + else + dr_actions_apply_tx(dmn, action_type_set, last_ste, attr, &added_stes); + + last_ste += added_stes * DR_STE_SIZE; + *new_num_stes += added_stes; + + mlx5dr_ste_set_hit_gvmi(last_ste, attr->hit_gvmi); + mlx5dr_ste_set_hit_addr(last_ste, attr->final_icm_addr, 1); +} + +static enum dr_action_domain +dr_action_get_action_domain(enum mlx5dr_domain_type domain, + enum mlx5dr_ste_entry_type ste_type) +{ + switch (domain) { + case MLX5DR_DOMAIN_TYPE_NIC_RX: + return DR_ACTION_DOMAIN_NIC_INGRESS; + case MLX5DR_DOMAIN_TYPE_NIC_TX: + return DR_ACTION_DOMAIN_NIC_EGRESS; + case MLX5DR_DOMAIN_TYPE_FDB: + if (ste_type == MLX5DR_STE_TYPE_RX) + return DR_ACTION_DOMAIN_FDB_INGRESS; + return DR_ACTION_DOMAIN_FDB_EGRESS; + default: + WARN_ON(true); + return DR_ACTION_DOMAIN_MAX; + } +} + +static +int dr_action_validate_and_get_next_state(enum dr_action_domain action_domain, + u32 action_type, + u32 *state) +{ + u32 cur_state = *state; + + /* Check action state machine is valid */ + *state = next_action_state[action_domain][cur_state][action_type]; + + if (*state == DR_ACTION_STATE_ERR) + return -EOPNOTSUPP; + + return 0; +} + +static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, + struct mlx5dr_action *dest_action, + u64 *final_icm_addr) +{ + int ret; + + switch (dest_action->action_type) { + case DR_ACTION_TYP_FT: + /* Allow destination flow table only if table is a terminating + * table, since there is an *assumption* that in such case FW + * will recalculate the CS. + */ + if (dest_action->dest_tbl.is_fw_tbl) { + *final_icm_addr = dest_action->dest_tbl.fw_tbl.rx_icm_addr; + } else { + mlx5dr_dbg(dmn, + "Destination FT should be terminating when modify TTL is used\n"); + return -EINVAL; + } + break; + + case DR_ACTION_TYP_VPORT: + /* If destination is vport we will get the FW flow table + * that recalculates the CS and forwards to the vport. + */ + ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn, + dest_action->vport.num, + final_icm_addr); + if (ret) { + mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n"); + return ret; + } + break; + + default: + break; + } + + return 0; +} + +#define WITH_VLAN_NUM_HW_ACTIONS 6 + +int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_action *actions[], + u32 num_actions, + u8 *ste_arr, + u32 *new_hw_ste_arr_sz) +{ + struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; + bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX; + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + u8 action_type_set[DR_ACTION_TYP_MAX] = {}; + struct mlx5dr_action *dest_action = NULL; + u32 state = DR_ACTION_STATE_NO_ACTION; + struct dr_action_apply_attr attr = {}; + enum dr_action_domain action_domain; + bool recalc_cs_required = false; + u8 *last_ste; + int i, ret; + + attr.gvmi = dmn->info.caps.gvmi; + attr.hit_gvmi = dmn->info.caps.gvmi; + attr.final_icm_addr = nic_dmn->default_icm_addr; + action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type); + + for (i = 0; i < num_actions; i++) { + struct mlx5dr_action *action; + int max_actions_type = 1; + u32 action_type; + + action = actions[i]; + action_type = action->action_type; + + switch (action_type) { + case DR_ACTION_TYP_DROP: + attr.final_icm_addr = nic_dmn->drop_icm_addr; + break; + case DR_ACTION_TYP_FT: + dest_action = action; + if (!action->dest_tbl.is_fw_tbl) { + if (action->dest_tbl.tbl->dmn != dmn) { + mlx5dr_dbg(dmn, + "Destination table belongs to a different domain\n"); + goto out_invalid_arg; + } + if (action->dest_tbl.tbl->level <= matcher->tbl->level) { + mlx5dr_dbg(dmn, + "Destination table level should be higher than source table\n"); + goto out_invalid_arg; + } + attr.final_icm_addr = rx_rule ? + action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : + action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr; + } else { + struct mlx5dr_cmd_query_flow_table_details output; + int ret; + + /* get the relevant addresses */ + if (!action->dest_tbl.fw_tbl.rx_icm_addr) { + ret = mlx5dr_cmd_query_flow_table(action->dest_tbl.fw_tbl.mdev, + action->dest_tbl.fw_tbl.ft->type, + action->dest_tbl.fw_tbl.ft->id, + &output); + if (!ret) { + action->dest_tbl.fw_tbl.tx_icm_addr = + output.sw_owner_icm_root_1; + action->dest_tbl.fw_tbl.rx_icm_addr = + output.sw_owner_icm_root_0; + } else { + mlx5dr_dbg(dmn, + "Failed mlx5_cmd_query_flow_table ret: %d\n", + ret); + return ret; + } + } + attr.final_icm_addr = rx_rule ? + action->dest_tbl.fw_tbl.rx_icm_addr : + action->dest_tbl.fw_tbl.tx_icm_addr; + } + break; + case DR_ACTION_TYP_QP: + mlx5dr_info(dmn, "Domain doesn't support QP\n"); + goto out_invalid_arg; + case DR_ACTION_TYP_CTR: + attr.ctr_id = action->ctr.ctr_id + + action->ctr.offeset; + break; + case DR_ACTION_TYP_TAG: + attr.flow_tag = action->flow_tag; + break; + case DR_ACTION_TYP_TNL_L2_TO_L2: + break; + case DR_ACTION_TYP_TNL_L3_TO_L2: + attr.decap_index = action->rewrite.index; + attr.decap_actions = action->rewrite.num_of_actions; + attr.decap_with_vlan = + attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS; + break; + case DR_ACTION_TYP_MODIFY_HDR: + attr.modify_index = action->rewrite.index; + attr.modify_actions = action->rewrite.num_of_actions; + recalc_cs_required = action->rewrite.modify_ttl; + break; + case DR_ACTION_TYP_L2_TO_TNL_L2: + case DR_ACTION_TYP_L2_TO_TNL_L3: + attr.reformat_size = action->reformat.reformat_size; + attr.reformat_id = action->reformat.reformat_id; + break; + case DR_ACTION_TYP_VPORT: + attr.hit_gvmi = action->vport.caps->vhca_gvmi; + dest_action = action; + if (rx_rule) { + /* Loopback on WIRE vport is not supported */ + if (action->vport.num == WIRE_PORT) + goto out_invalid_arg; + + attr.final_icm_addr = action->vport.caps->icm_address_rx; + } else { + attr.final_icm_addr = action->vport.caps->icm_address_tx; + } + break; + case DR_ACTION_TYP_POP_VLAN: + max_actions_type = MAX_VLANS; + attr.vlans.count++; + break; + case DR_ACTION_TYP_PUSH_VLAN: + max_actions_type = MAX_VLANS; + if (attr.vlans.count == MAX_VLANS) + return -EINVAL; + + attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr; + break; + default: + goto out_invalid_arg; + } + + /* Check action duplication */ + if (++action_type_set[action_type] > max_actions_type) { + mlx5dr_dbg(dmn, "Action type %d supports only max %d time(s)\n", + action_type, max_actions_type); + goto out_invalid_arg; + } + + /* Check action state machine is valid */ + if (dr_action_validate_and_get_next_state(action_domain, + action_type, + &state)) { + mlx5dr_dbg(dmn, "Invalid action sequence provided\n"); + return -EOPNOTSUPP; + } + } + + *new_hw_ste_arr_sz = nic_matcher->num_of_builders; + last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1); + + /* Due to a HW bug, modifying TTL on RX flows will cause an incorrect + * checksum calculation. In this case we will use a FW table to + * recalculate. + */ + if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB && + rx_rule && recalc_cs_required && dest_action) { + ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr); + if (ret) { + mlx5dr_dbg(dmn, + "Failed to handle checksum recalculation err %d\n", + ret); + return ret; + } + } + + dr_actions_apply(dmn, + nic_dmn->ste_type, + action_type_set, + last_ste, + &attr, + new_hw_ste_arr_sz); + + return 0; + +out_invalid_arg: + return -EINVAL; +} + +#define CVLAN_ETHERTYPE 0x8100 +#define SVLAN_ETHERTYPE 0x88a8 +#define HDR_LEN_L2_ONLY 14 +#define HDR_LEN_L2_VLAN 18 +#define REWRITE_HW_ACTION_NUM 6 + +static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn, + struct mlx5dr_action *action, + void *data, size_t data_sz) +{ + struct mlx5_ifc_l2_hdr_bits *l2_hdr = data; + u64 ops[REWRITE_HW_ACTION_NUM] = {}; + u32 hdr_fld_4b; + u16 hdr_fld_2b; + u16 vlan_type; + bool vlan; + int i = 0; + int ret; + + vlan = (data_sz != HDR_LEN_L2_ONLY); + + /* dmac_47_16 */ + MLX5_SET(dr_action_hw_set, ops + i, + opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); + MLX5_SET(dr_action_hw_set, ops + i, + destination_length, 0); + MLX5_SET(dr_action_hw_set, ops + i, + destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0); + MLX5_SET(dr_action_hw_set, ops + i, + destination_left_shifter, 16); + hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16); + MLX5_SET(dr_action_hw_set, ops + i, + inline_data, hdr_fld_4b); + i++; + + /* smac_47_16 */ + MLX5_SET(dr_action_hw_set, ops + i, + opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); + MLX5_SET(dr_action_hw_set, ops + i, + destination_length, 0); + MLX5_SET(dr_action_hw_set, ops + i, + destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1); + MLX5_SET(dr_action_hw_set, ops + i, + destination_left_shifter, 16); + hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 | + MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16); + MLX5_SET(dr_action_hw_set, ops + i, + inline_data, hdr_fld_4b); + i++; + + /* dmac_15_0 */ + MLX5_SET(dr_action_hw_set, ops + i, + opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); + MLX5_SET(dr_action_hw_set, ops + i, + destination_length, 16); + MLX5_SET(dr_action_hw_set, ops + i, + destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0); + MLX5_SET(dr_action_hw_set, ops + i, + destination_left_shifter, 0); + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0); + MLX5_SET(dr_action_hw_set, ops + i, + inline_data, hdr_fld_2b); + i++; + + /* ethertype + (optional) vlan */ + MLX5_SET(dr_action_hw_set, ops + i, + opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); + MLX5_SET(dr_action_hw_set, ops + i, + destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2); + MLX5_SET(dr_action_hw_set, ops + i, + destination_left_shifter, 32); + if (!vlan) { + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype); + MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_2b); + MLX5_SET(dr_action_hw_set, ops + i, destination_length, 16); + } else { + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype); + vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN; + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan); + hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b; + MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_4b); + MLX5_SET(dr_action_hw_set, ops + i, destination_length, 18); + } + i++; + + /* smac_15_0 */ + MLX5_SET(dr_action_hw_set, ops + i, + opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); + MLX5_SET(dr_action_hw_set, ops + i, + destination_length, 16); + MLX5_SET(dr_action_hw_set, ops + i, + destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1); + MLX5_SET(dr_action_hw_set, ops + i, + destination_left_shifter, 0); + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0); + MLX5_SET(dr_action_hw_set, ops + i, + inline_data, hdr_fld_2b); + i++; + + if (vlan) { + MLX5_SET(dr_action_hw_set, ops + i, + opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); + hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type); + MLX5_SET(dr_action_hw_set, ops + i, + inline_data, hdr_fld_2b); + MLX5_SET(dr_action_hw_set, ops + i, + destination_length, 16); + MLX5_SET(dr_action_hw_set, ops + i, + destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2); + MLX5_SET(dr_action_hw_set, ops + i, + destination_left_shifter, 0); + i++; + } + + action->rewrite.data = (void *)ops; + action->rewrite.num_of_actions = i; + action->rewrite.chunk->byte_size = i * sizeof(*ops); + + ret = mlx5dr_send_postsend_action(dmn, action); + if (ret) { + mlx5dr_dbg(dmn, "Writing encapsulation action to ICM failed\n"); + return ret; + } + + return 0; +} + +static struct mlx5dr_action * +dr_action_create_generic(enum mlx5dr_action_type action_type) +{ + struct mlx5dr_action *action; + + action = kzalloc(sizeof(*action), GFP_KERNEL); + if (!action) + return NULL; + + action->action_type = action_type; + refcount_set(&action->refcount, 1); + + return action; +} + +struct mlx5dr_action *mlx5dr_action_create_drop(void) +{ + return dr_action_create_generic(DR_ACTION_TYP_DROP); +} + +struct mlx5dr_action * +mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl) +{ + struct mlx5dr_action *action; + + refcount_inc(&tbl->refcount); + + action = dr_action_create_generic(DR_ACTION_TYP_FT); + if (!action) + goto dec_ref; + + action->dest_tbl.tbl = tbl; + + return action; + +dec_ref: + refcount_dec(&tbl->refcount); + return NULL; +} + +struct mlx5dr_action * +mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft, + struct mlx5_core_dev *mdev) +{ + struct mlx5dr_action *action; + + action = dr_action_create_generic(DR_ACTION_TYP_FT); + if (!action) + return NULL; + + action->dest_tbl.is_fw_tbl = 1; + action->dest_tbl.fw_tbl.ft = ft; + action->dest_tbl.fw_tbl.mdev = mdev; + + return action; +} + +struct mlx5dr_action * +mlx5dr_action_create_flow_counter(u32 counter_id) +{ + struct mlx5dr_action *action; + + action = dr_action_create_generic(DR_ACTION_TYP_CTR); + if (!action) + return NULL; + + action->ctr.ctr_id = counter_id; + + return action; +} + +struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value) +{ + struct mlx5dr_action *action; + + action = dr_action_create_generic(DR_ACTION_TYP_TAG); + if (!action) + return NULL; + + action->flow_tag = tag_value & 0xffffff; + + return action; +} + +static int +dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type, + struct mlx5dr_domain *dmn, + size_t data_sz, + void *data) +{ + if ((!data && data_sz) || (data && !data_sz) || reformat_type > + DR_ACTION_TYP_L2_TO_TNL_L3) { + mlx5dr_dbg(dmn, "Invalid reformat parameter!\n"); + goto out_err; + } + + if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB) + return 0; + + if (dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) { + if (reformat_type != DR_ACTION_TYP_TNL_L2_TO_L2 && + reformat_type != DR_ACTION_TYP_TNL_L3_TO_L2) { + mlx5dr_dbg(dmn, "Action reformat type not support on RX domain\n"); + goto out_err; + } + } else if (dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) { + if (reformat_type != DR_ACTION_TYP_L2_TO_TNL_L2 && + reformat_type != DR_ACTION_TYP_L2_TO_TNL_L3) { + mlx5dr_dbg(dmn, "Action reformat type not support on TX domain\n"); + goto out_err; + } + } + + return 0; + +out_err: + return -EINVAL; +} + +#define ACTION_CACHE_LINE_SIZE 64 + +static int +dr_action_create_reformat_action(struct mlx5dr_domain *dmn, + size_t data_sz, void *data, + struct mlx5dr_action *action) +{ + u32 reformat_id; + int ret; + + switch (action->action_type) { + case DR_ACTION_TYP_L2_TO_TNL_L2: + case DR_ACTION_TYP_L2_TO_TNL_L3: + { + enum mlx5_reformat_ctx_type rt; + + if (action->action_type == DR_ACTION_TYP_L2_TO_TNL_L2) + rt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL; + else + rt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL; + + ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, data_sz, data, + &reformat_id); + if (ret) + return ret; + + action->reformat.reformat_id = reformat_id; + action->reformat.reformat_size = data_sz; + return 0; + } + case DR_ACTION_TYP_TNL_L2_TO_L2: + { + return 0; + } + case DR_ACTION_TYP_TNL_L3_TO_L2: + { + /* Only Ethernet frame is supported, with VLAN (18) or without (14) */ + if (data_sz != HDR_LEN_L2_ONLY && data_sz != HDR_LEN_L2_VLAN) + return -EINVAL; + + action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, + DR_CHUNK_SIZE_8); + if (!action->rewrite.chunk) + return -ENOMEM; + + action->rewrite.index = (action->rewrite.chunk->icm_addr - + dmn->info.caps.hdr_modify_icm_addr) / + ACTION_CACHE_LINE_SIZE; + + ret = dr_actions_l2_rewrite(dmn, action, data, data_sz); + if (ret) { + mlx5dr_icm_free_chunk(action->rewrite.chunk); + return ret; + } + return 0; + } + default: + mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type); + return -EINVAL; + } +} + +struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void) +{ + return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN); +} + +struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn, + __be32 vlan_hdr) +{ + u32 vlan_hdr_h = ntohl(vlan_hdr); + u16 ethertype = vlan_hdr_h >> 16; + struct mlx5dr_action *action; + + if (ethertype != SVLAN_ETHERTYPE && ethertype != CVLAN_ETHERTYPE) { + mlx5dr_dbg(dmn, "Invalid vlan ethertype\n"); + return NULL; + } + + action = dr_action_create_generic(DR_ACTION_TYP_PUSH_VLAN); + if (!action) + return NULL; + + action->push_vlan.vlan_hdr = vlan_hdr_h; + return action; +} + +struct mlx5dr_action * +mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn, + enum mlx5dr_action_reformat_type reformat_type, + size_t data_sz, + void *data) +{ + enum mlx5dr_action_type action_type; + struct mlx5dr_action *action; + int ret; + + refcount_inc(&dmn->refcount); + + /* General checks */ + ret = dr_action_reformat_to_action_type(reformat_type, &action_type); + if (ret) { + mlx5dr_dbg(dmn, "Invalid reformat_type provided\n"); + goto dec_ref; + } + + ret = dr_action_verify_reformat_params(action_type, dmn, data_sz, data); + if (ret) + goto dec_ref; + + action = dr_action_create_generic(action_type); + if (!action) + goto dec_ref; + + action->reformat.dmn = dmn; + + ret = dr_action_create_reformat_action(dmn, + data_sz, + data, + action); + if (ret) { + mlx5dr_dbg(dmn, "Failed creating reformat action %d\n", ret); + goto free_action; + } + + return action; + +free_action: + kfree(action); +dec_ref: + refcount_dec(&dmn->refcount); + return NULL; +} + +static const struct dr_action_modify_field_conv * +dr_action_modify_get_hw_info(u16 sw_field) +{ + const struct dr_action_modify_field_conv *hw_action_info; + + if (sw_field >= ARRAY_SIZE(dr_action_conv_arr)) + goto not_found; + + hw_action_info = &dr_action_conv_arr[sw_field]; + if (!hw_action_info->end && !hw_action_info->start) + goto not_found; + + return hw_action_info; + +not_found: + return NULL; +} + +static int +dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn, + __be64 *sw_action, + __be64 *hw_action, + const struct dr_action_modify_field_conv **ret_hw_info) +{ + const struct dr_action_modify_field_conv *hw_action_info; + u8 offset, length, max_length, action; + u16 sw_field; + u8 hw_opcode; + u32 data; + + /* Get SW modify action data */ + action = MLX5_GET(set_action_in, sw_action, action_type); + length = MLX5_GET(set_action_in, sw_action, length); + offset = MLX5_GET(set_action_in, sw_action, offset); + sw_field = MLX5_GET(set_action_in, sw_action, field); + data = MLX5_GET(set_action_in, sw_action, data); + + /* Convert SW data to HW modify action format */ + hw_action_info = dr_action_modify_get_hw_info(sw_field); + if (!hw_action_info) { + mlx5dr_dbg(dmn, "Modify action invalid field given\n"); + return -EINVAL; + } + + max_length = hw_action_info->end - hw_action_info->start + 1; + + switch (action) { + case MLX5_ACTION_TYPE_SET: + hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_SET; + /* PRM defines that length zero specific length of 32bits */ + if (!length) + length = 32; + + if (length + offset > max_length) { + mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n"); + return -EINVAL; + } + break; + + case MLX5_ACTION_TYPE_ADD: + hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_ADD; + offset = 0; + length = max_length; + break; + + default: + mlx5dr_info(dmn, "Unsupported action_type for modify action\n"); + return -EOPNOTSUPP; + } + + MLX5_SET(dr_action_hw_set, hw_action, opcode, hw_opcode); + + MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, + hw_action_info->hw_field); + + MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, + hw_action_info->start + offset); + + MLX5_SET(dr_action_hw_set, hw_action, destination_length, + length == 32 ? 0 : length); + + MLX5_SET(dr_action_hw_set, hw_action, inline_data, data); + + *ret_hw_info = hw_action_info; + + return 0; +} + +static int +dr_action_modify_check_field_limitation(struct mlx5dr_domain *dmn, + const __be64 *sw_action) +{ + u16 sw_field; + u8 action; + + sw_field = MLX5_GET(set_action_in, sw_action, field); + action = MLX5_GET(set_action_in, sw_action, action_type); + + /* Check if SW field is supported in current domain (RX/TX) */ + if (action == MLX5_ACTION_TYPE_SET) { + if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) { + if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) { + mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n", + sw_field); + return -EINVAL; + } + } + + if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) { + if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) { + mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n", + sw_field); + return -EINVAL; + } + } + } else if (action == MLX5_ACTION_TYPE_ADD) { + if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL && + sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT && + sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM && + sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) { + mlx5dr_dbg(dmn, "Unsupported field %d for add action\n", sw_field); + return -EINVAL; + } + } else { + mlx5dr_info(dmn, "Unsupported action %d modify action\n", action); + return -EOPNOTSUPP; + } + + return 0; +} + +static bool +dr_action_modify_check_is_ttl_modify(const u64 *sw_action) +{ + u16 sw_field = MLX5_GET(set_action_in, sw_action, field); + + return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL; +} + +static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn, + u32 max_hw_actions, + u32 num_sw_actions, + __be64 sw_actions[], + __be64 hw_actions[], + u32 *num_hw_actions, + bool *modify_ttl) +{ + const struct dr_action_modify_field_conv *hw_action_info; + u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED; + u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE; + u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE; + int ret, i, hw_idx = 0; + __be64 *sw_action; + __be64 hw_action; + + *modify_ttl = false; + + for (i = 0; i < num_sw_actions; i++) { + sw_action = &sw_actions[i]; + + ret = dr_action_modify_check_field_limitation(dmn, sw_action); + if (ret) + return ret; + + if (!(*modify_ttl)) + *modify_ttl = dr_action_modify_check_is_ttl_modify(sw_action); + + /* Convert SW action to HW action */ + ret = dr_action_modify_sw_to_hw(dmn, + sw_action, + &hw_action, + &hw_action_info); + if (ret) + return ret; + + /* Due to a HW limitation we cannot modify 2 different L3 types */ + if (l3_type && hw_action_info->l3_type && + hw_action_info->l3_type != l3_type) { + mlx5dr_dbg(dmn, "Action list can't support two different L3 types\n"); + return -EINVAL; + } + if (hw_action_info->l3_type) + l3_type = hw_action_info->l3_type; + + /* Due to a HW limitation we cannot modify two different L4 types */ + if (l4_type && hw_action_info->l4_type && + hw_action_info->l4_type != l4_type) { + mlx5dr_dbg(dmn, "Action list can't support two different L4 types\n"); + return -EINVAL; + } + if (hw_action_info->l4_type) + l4_type = hw_action_info->l4_type; + + /* HW reads and executes two actions at once this means we + * need to create a gap if two actions access the same field + */ + if ((hw_idx % 2) && hw_field == hw_action_info->hw_field) { + /* Check if after gap insertion the total number of HW + * modify actions doesn't exceeds the limit + */ + hw_idx++; + if ((num_sw_actions + hw_idx - i) >= max_hw_actions) { + mlx5dr_dbg(dmn, "Modify header action number exceeds HW limit\n"); + return -EINVAL; + } + } + hw_field = hw_action_info->hw_field; + + hw_actions[hw_idx] = hw_action; + hw_idx++; + } + + *num_hw_actions = hw_idx; + + return 0; +} + +static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, + size_t actions_sz, + __be64 actions[], + struct mlx5dr_action *action) +{ + struct mlx5dr_icm_chunk *chunk; + u32 max_hw_actions; + u32 num_hw_actions; + u32 num_sw_actions; + __be64 *hw_actions; + bool modify_ttl; + int ret; + + num_sw_actions = actions_sz / DR_MODIFY_ACTION_SIZE; + max_hw_actions = mlx5dr_icm_pool_chunk_size_to_entries(DR_CHUNK_SIZE_16); + + if (num_sw_actions > max_hw_actions) { + mlx5dr_dbg(dmn, "Max number of actions %d exceeds limit %d\n", + num_sw_actions, max_hw_actions); + return -EINVAL; + } + + chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, DR_CHUNK_SIZE_16); + if (!chunk) + return -ENOMEM; + + hw_actions = kcalloc(1, max_hw_actions * DR_MODIFY_ACTION_SIZE, GFP_KERNEL); + if (!hw_actions) { + ret = -ENOMEM; + goto free_chunk; + } + + ret = dr_actions_convert_modify_header(dmn, + max_hw_actions, + num_sw_actions, + actions, + hw_actions, + &num_hw_actions, + &modify_ttl); + if (ret) + goto free_hw_actions; + + action->rewrite.chunk = chunk; + action->rewrite.modify_ttl = modify_ttl; + action->rewrite.data = (u8 *)hw_actions; + action->rewrite.num_of_actions = num_hw_actions; + action->rewrite.index = (chunk->icm_addr - + dmn->info.caps.hdr_modify_icm_addr) / + ACTION_CACHE_LINE_SIZE; + + ret = mlx5dr_send_postsend_action(dmn, action); + if (ret) + goto free_hw_actions; + + return 0; + +free_hw_actions: + kfree(hw_actions); +free_chunk: + mlx5dr_icm_free_chunk(chunk); + return ret; +} + +struct mlx5dr_action * +mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn, + u32 flags, + size_t actions_sz, + __be64 actions[]) +{ + struct mlx5dr_action *action; + int ret = 0; + + refcount_inc(&dmn->refcount); + + if (actions_sz % DR_MODIFY_ACTION_SIZE) { + mlx5dr_dbg(dmn, "Invalid modify actions size provided\n"); + goto dec_ref; + } + + action = dr_action_create_generic(DR_ACTION_TYP_MODIFY_HDR); + if (!action) + goto dec_ref; + + action->rewrite.dmn = dmn; + + ret = dr_action_create_modify_action(dmn, + actions_sz, + actions, + action); + if (ret) { + mlx5dr_dbg(dmn, "Failed creating modify header action %d\n", ret); + goto free_action; + } + + return action; + +free_action: + kfree(action); +dec_ref: + refcount_dec(&dmn->refcount); + return NULL; +} + +struct mlx5dr_action * +mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, + u32 vport, u8 vhca_id_valid, + u16 vhca_id) +{ + struct mlx5dr_cmd_vport_cap *vport_cap; + struct mlx5dr_domain *vport_dmn; + struct mlx5dr_action *action; + u8 peer_vport; + + peer_vport = vhca_id_valid && (vhca_id != dmn->info.caps.gvmi); + vport_dmn = peer_vport ? dmn->peer_dmn : dmn; + if (!vport_dmn) { + mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n"); + return NULL; + } + + if (vport_dmn->type != MLX5DR_DOMAIN_TYPE_FDB) { + mlx5dr_dbg(dmn, "Domain doesn't support vport actions\n"); + return NULL; + } + + vport_cap = mlx5dr_get_vport_cap(&vport_dmn->info.caps, vport); + if (!vport_cap) { + mlx5dr_dbg(dmn, "Failed to get vport %d caps\n", vport); + return NULL; + } + + action = dr_action_create_generic(DR_ACTION_TYP_VPORT); + if (!action) + return NULL; + + action->vport.dmn = vport_dmn; + action->vport.caps = vport_cap; + + return action; +} + +int mlx5dr_action_destroy(struct mlx5dr_action *action) +{ + if (refcount_read(&action->refcount) > 1) + return -EBUSY; + + switch (action->action_type) { + case DR_ACTION_TYP_FT: + if (!action->dest_tbl.is_fw_tbl) + refcount_dec(&action->dest_tbl.tbl->refcount); + break; + case DR_ACTION_TYP_TNL_L2_TO_L2: + refcount_dec(&action->reformat.dmn->refcount); + break; + case DR_ACTION_TYP_TNL_L3_TO_L2: + mlx5dr_icm_free_chunk(action->rewrite.chunk); + refcount_dec(&action->reformat.dmn->refcount); + break; + case DR_ACTION_TYP_L2_TO_TNL_L2: + case DR_ACTION_TYP_L2_TO_TNL_L3: + mlx5dr_cmd_destroy_reformat_ctx((action->reformat.dmn)->mdev, + action->reformat.reformat_id); + refcount_dec(&action->reformat.dmn->refcount); + break; + case DR_ACTION_TYP_MODIFY_HDR: + mlx5dr_icm_free_chunk(action->rewrite.chunk); + refcount_dec(&action->rewrite.dmn->refcount); + break; + default: + break; + } + + kfree(action); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c new file mode 100644 index 000000000000..41662c4e2664 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -0,0 +1,480 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include "dr_types.h" + +int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev, + bool other_vport, + u16 vport_number, + u64 *icm_address_rx, + u64 *icm_address_tx) +{ + u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; + int err; + + MLX5_SET(query_esw_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); + MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport); + MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + *icm_address_rx = + MLX5_GET64(query_esw_vport_context_out, out, + esw_vport_context.sw_steering_vport_icm_address_rx); + *icm_address_tx = + MLX5_GET64(query_esw_vport_context_out, out, + esw_vport_context.sw_steering_vport_icm_address_tx); + return 0; +} + +int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport, + u16 vport_number, u16 *gvmi) +{ + u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {}; + int out_size; + void *out; + int err; + + out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out); + out = kzalloc(out_size, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, other_function, other_vport); + MLX5_SET(query_hca_cap_in, in, function_id, vport_number); + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | + HCA_CAP_OPMOD_GET_CUR); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (err) { + kfree(out); + return err; + } + + *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id); + + kfree(out); + return 0; +} + +int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev, + struct mlx5dr_esw_caps *caps) +{ + caps->drop_icm_address_rx = + MLX5_CAP64_ESW_FLOWTABLE(mdev, + sw_steering_fdb_action_drop_icm_address_rx); + caps->drop_icm_address_tx = + MLX5_CAP64_ESW_FLOWTABLE(mdev, + sw_steering_fdb_action_drop_icm_address_tx); + caps->uplink_icm_address_rx = + MLX5_CAP64_ESW_FLOWTABLE(mdev, + sw_steering_uplink_icm_address_rx); + caps->uplink_icm_address_tx = + MLX5_CAP64_ESW_FLOWTABLE(mdev, + sw_steering_uplink_icm_address_tx); + caps->sw_owner = + MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, + sw_owner); + + return 0; +} + +int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, + struct mlx5dr_cmd_caps *caps) +{ + caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required); + caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager); + caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); + caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); + + if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) { + caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); + caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); + } + + if (mlx5dr_matcher_supp_flex_parser_icmp_v6(caps)) { + caps->flex_parser_id_icmpv6_dw0 = + MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0); + caps->flex_parser_id_icmpv6_dw1 = + MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1); + } + + caps->nic_rx_drop_address = + MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address); + caps->nic_tx_drop_address = + MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address); + caps->nic_tx_allow_address = + MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address); + + caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner); + caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level); + + caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner); + + caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size); + caps->hdr_modify_icm_addr = + MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address); + + caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port); + + return 0; +} + +int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev, + enum fs_flow_table_type type, + u32 table_id, + struct mlx5dr_cmd_query_flow_table_details *output) +{ + u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {}; + int err; + + MLX5_SET(query_flow_table_in, in, opcode, + MLX5_CMD_OP_QUERY_FLOW_TABLE); + + MLX5_SET(query_flow_table_in, in, table_type, type); + MLX5_SET(query_flow_table_in, in, table_id, table_id); + + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + output->status = MLX5_GET(query_flow_table_out, out, status); + output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level); + + output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out, + flow_table_context.sw_owner_icm_root_1); + output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out, + flow_table_context.sw_owner_icm_root_0); + + return 0; +} + +int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev) +{ + u32 out[MLX5_ST_SZ_DW(sync_steering_out)] = {}; + u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {}; + + MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING); + + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id, + u32 group_id, + u32 modify_header_id, + u32 vport_id) +{ + u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {}; + void *in_flow_context; + unsigned int inlen; + void *in_dests; + u32 *in; + int err; + + inlen = MLX5_ST_SZ_BYTES(set_fte_in) + + 1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */ + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); + MLX5_SET(set_fte_in, in, table_type, table_type); + MLX5_SET(set_fte_in, in, table_id, table_id); + + in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); + MLX5_SET(flow_context, in_flow_context, group_id, group_id); + MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id); + MLX5_SET(flow_context, in_flow_context, destination_list_size, 1); + MLX5_SET(flow_context, in_flow_context, action, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR); + + in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); + MLX5_SET(dest_format_struct, in_dests, destination_type, + MLX5_FLOW_DESTINATION_TYPE_VPORT); + MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id); + + err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + kvfree(in); + + return err; +} + +int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id) +{ + u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {}; + u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {}; + + MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); + MLX5_SET(delete_fte_in, in, table_type, table_type); + MLX5_SET(delete_fte_in, in, table_id, table_id); + + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev, + u32 table_type, + u8 num_of_actions, + u64 *actions, + u32 *modify_header_id) +{ + u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {}; + void *p_actions; + u32 inlen; + u32 *in; + int err; + + inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + + num_of_actions * sizeof(u64); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(alloc_modify_header_context_in, in, opcode, + MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT); + MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type); + MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions); + p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions); + memcpy(p_actions, actions, num_of_actions * sizeof(u64)); + + err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + if (err) + goto out; + + *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, + modify_header_id); +out: + kvfree(in); + return err; +} + +int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev, + u32 modify_header_id) +{ + u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)] = {}; + u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {}; + + MLX5_SET(dealloc_modify_header_context_in, in, opcode, + MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT); + MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id, + modify_header_id); + + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id, + u32 *group_id) +{ + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {}; + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + u32 *in; + int err; + + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); + MLX5_SET(create_flow_group_in, in, table_type, table_type); + MLX5_SET(create_flow_group_in, in, table_id, table_id); + + err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + if (err) + goto out; + + *group_id = MLX5_GET(create_flow_group_out, out, group_id); + +out: + kfree(in); + return err; +} + +int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id, + u32 group_id) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {}; + u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {}; + + MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); + MLX5_SET(destroy_flow_group_in, in, table_type, table_type); + MLX5_SET(destroy_flow_group_in, in, table_id, table_id); + MLX5_SET(destroy_flow_group_in, in, group_id, group_id); + + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev, + u32 table_type, + u64 icm_addr_rx, + u64 icm_addr_tx, + u8 level, + bool sw_owner, + bool term_tbl, + u64 *fdb_rx_icm_addr, + u32 *table_id) +{ + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {}; + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {}; + void *ft_mdev; + int err; + + MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); + MLX5_SET(create_flow_table_in, in, table_type, table_type); + + ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); + MLX5_SET(flow_table_context, ft_mdev, termination_table, term_tbl); + MLX5_SET(flow_table_context, ft_mdev, sw_owner, sw_owner); + MLX5_SET(flow_table_context, ft_mdev, level, level); + + if (sw_owner) { + /* icm_addr_0 used for FDB RX / NIC TX / NIC_RX + * icm_addr_1 used for FDB TX + */ + if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) { + MLX5_SET64(flow_table_context, ft_mdev, + sw_owner_icm_root_0, icm_addr_rx); + } else if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) { + MLX5_SET64(flow_table_context, ft_mdev, + sw_owner_icm_root_0, icm_addr_tx); + } else if (table_type == MLX5_FLOW_TABLE_TYPE_FDB) { + MLX5_SET64(flow_table_context, ft_mdev, + sw_owner_icm_root_0, icm_addr_rx); + MLX5_SET64(flow_table_context, ft_mdev, + sw_owner_icm_root_1, icm_addr_tx); + } + } + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + *table_id = MLX5_GET(create_flow_table_out, out, table_id); + if (!sw_owner && table_type == MLX5_FLOW_TABLE_TYPE_FDB) + *fdb_rx_icm_addr = + (u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) | + (u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 | + (u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40; + + return 0; +} + +int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev, + u32 table_id, + u32 table_type) +{ + u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {}; + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {}; + + MLX5_SET(destroy_flow_table_in, in, opcode, + MLX5_CMD_OP_DESTROY_FLOW_TABLE); + MLX5_SET(destroy_flow_table_in, in, table_type, table_type); + MLX5_SET(destroy_flow_table_in, in, table_id, table_id); + + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev, + enum mlx5_reformat_ctx_type rt, + size_t reformat_size, + void *reformat_data, + u32 *reformat_id) +{ + u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {}; + size_t inlen, cmd_data_sz, cmd_total_sz; + void *prctx; + void *pdata; + void *in; + int err; + + cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in); + cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in, + packet_reformat_context.reformat_data); + inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(alloc_packet_reformat_context_in, in, opcode, + MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT); + + prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context); + pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data); + + MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt); + MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size); + memcpy(pdata, reformat_data, reformat_size); + + err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + if (err) + return err; + + *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id); + kvfree(in); + + return err; +} + +void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev, + u32 reformat_id) +{ + u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {}; + u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {}; + + MLX5_SET(dealloc_packet_reformat_context_in, in, opcode, + MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT); + MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id, + reformat_id); + + mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, + u16 index, struct mlx5dr_cmd_gid_attr *attr) +{ + u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {}; + int err; + + MLX5_SET(query_roce_address_in, in, opcode, + MLX5_CMD_OP_QUERY_ROCE_ADDRESS); + + MLX5_SET(query_roce_address_in, in, roce_address_index, index); + MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + memcpy(&attr->gid, + MLX5_ADDR_OF(query_roce_address_out, + out, roce_address.source_l3_address), + sizeof(attr->gid)); + memcpy(attr->mac, + MLX5_ADDR_OF(query_roce_address_out, out, + roce_address.source_mac_47_32), + sizeof(attr->mac)); + + if (MLX5_GET(query_roce_address_out, out, + roce_address.roce_version) == MLX5_ROCE_VERSION_2) + attr->roce_ver = MLX5_ROCE_VERSION_2; + else + attr->roce_ver = MLX5_ROCE_VERSION_1; + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c new file mode 100644 index 000000000000..9e2eccbb1eb8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +/* Copyright (c) 2011-2015 Stephan Brumme. All rights reserved. + * Slicing-by-16 contributed by Bulat Ziganshin + * + * This software is provided 'as-is', without any express or implied warranty. + * In no event will the author be held liable for any damages arising from the + * of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. + * 2. If you use this software in a product, an acknowledgment in the product + * documentation would be appreciated but is not required. + * 3. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * + * Taken from http://create.stephan-brumme.com/crc32/ and adapted. + */ + +#include "dr_types.h" + +#define DR_STE_CRC_POLY 0xEDB88320L + +static u32 dr_ste_crc_tab32[8][256]; + +static void dr_crc32_calc_lookup_entry(u32 (*tbl)[256], u8 i, u8 j) +{ + tbl[i][j] = (tbl[i - 1][j] >> 8) ^ tbl[0][tbl[i - 1][j] & 0xff]; +} + +void mlx5dr_crc32_init_table(void) +{ + u32 crc, i, j; + + for (i = 0; i < 256; i++) { + crc = i; + for (j = 0; j < 8; j++) { + if (crc & 0x00000001L) + crc = (crc >> 1) ^ DR_STE_CRC_POLY; + else + crc = crc >> 1; + } + dr_ste_crc_tab32[0][i] = crc; + } + + /* Init CRC lookup tables according to crc_slice_8 algorithm */ + for (i = 0; i < 256; i++) { + dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 1, i); + dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 2, i); + dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 3, i); + dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 4, i); + dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 5, i); + dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 6, i); + dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 7, i); + } +} + +/* Compute CRC32 (Slicing-by-8 algorithm) */ +u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length) +{ + const u32 *curr = (const u32 *)input_data; + const u8 *curr_char; + u32 crc = 0, one, two; + + if (!input_data) + return 0; + + /* Process eight bytes at once (Slicing-by-8) */ + while (length >= 8) { + one = *curr++ ^ crc; + two = *curr++; + + crc = dr_ste_crc_tab32[0][(two >> 24) & 0xff] + ^ dr_ste_crc_tab32[1][(two >> 16) & 0xff] + ^ dr_ste_crc_tab32[2][(two >> 8) & 0xff] + ^ dr_ste_crc_tab32[3][two & 0xff] + ^ dr_ste_crc_tab32[4][(one >> 24) & 0xff] + ^ dr_ste_crc_tab32[5][(one >> 16) & 0xff] + ^ dr_ste_crc_tab32[6][(one >> 8) & 0xff] + ^ dr_ste_crc_tab32[7][one & 0xff]; + + length -= 8; + } + + curr_char = (const u8 *)curr; + /* Remaining 1 to 7 bytes (standard algorithm) */ + while (length-- != 0) + crc = (crc >> 8) ^ dr_ste_crc_tab32[0][(crc & 0xff) + ^ *curr_char++]; + + return ((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) | + ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c new file mode 100644 index 000000000000..5b24732b18c0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include <linux/mlx5/eswitch.h> +#include "dr_types.h" + +static int dr_domain_init_cache(struct mlx5dr_domain *dmn) +{ + /* Per vport cached FW FT for checksum recalculation, this + * recalculation is needed due to a HW bug. + */ + dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports, + sizeof(dmn->cache.recalc_cs_ft[0]), + GFP_KERNEL); + if (!dmn->cache.recalc_cs_ft) + return -ENOMEM; + + return 0; +} + +static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn) +{ + int i; + + for (i = 0; i < dmn->info.caps.num_vports; i++) { + if (!dmn->cache.recalc_cs_ft[i]) + continue; + + mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]); + } + + kfree(dmn->cache.recalc_cs_ft); +} + +int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, + u32 vport_num, + u64 *rx_icm_addr) +{ + struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; + + recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num]; + if (!recalc_cs_ft) { + /* Table not in cache, need to allocate a new one */ + recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num); + if (!recalc_cs_ft) + return -EINVAL; + + dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft; + } + + *rx_icm_addr = recalc_cs_ft->rx_icm_addr; + + return 0; +} + +static int dr_domain_init_resources(struct mlx5dr_domain *dmn) +{ + int ret; + + ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn); + if (ret) { + mlx5dr_dbg(dmn, "Couldn't allocate PD\n"); + return ret; + } + + dmn->uar = mlx5_get_uars_page(dmn->mdev); + if (!dmn->uar) { + mlx5dr_err(dmn, "Couldn't allocate UAR\n"); + ret = -ENOMEM; + goto clean_pd; + } + + dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE); + if (!dmn->ste_icm_pool) { + mlx5dr_err(dmn, "Couldn't get icm memory\n"); + ret = -ENOMEM; + goto clean_uar; + } + + dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION); + if (!dmn->action_icm_pool) { + mlx5dr_err(dmn, "Couldn't get action icm memory\n"); + ret = -ENOMEM; + goto free_ste_icm_pool; + } + + ret = mlx5dr_send_ring_alloc(dmn); + if (ret) { + mlx5dr_err(dmn, "Couldn't create send-ring\n"); + goto free_action_icm_pool; + } + + return 0; + +free_action_icm_pool: + mlx5dr_icm_pool_destroy(dmn->action_icm_pool); +free_ste_icm_pool: + mlx5dr_icm_pool_destroy(dmn->ste_icm_pool); +clean_uar: + mlx5_put_uars_page(dmn->mdev, dmn->uar); +clean_pd: + mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn); + + return ret; +} + +static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn) +{ + mlx5dr_send_ring_free(dmn, dmn->send_ring); + mlx5dr_icm_pool_destroy(dmn->action_icm_pool); + mlx5dr_icm_pool_destroy(dmn->ste_icm_pool); + mlx5_put_uars_page(dmn->mdev, dmn->uar); + mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn); +} + +static int dr_domain_query_vport(struct mlx5dr_domain *dmn, + bool other_vport, + u16 vport_number) +{ + struct mlx5dr_cmd_vport_cap *vport_caps; + int ret; + + vport_caps = &dmn->info.caps.vports_caps[vport_number]; + + ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev, + other_vport, + vport_number, + &vport_caps->icm_address_rx, + &vport_caps->icm_address_tx); + if (ret) + return ret; + + ret = mlx5dr_cmd_query_gvmi(dmn->mdev, + other_vport, + vport_number, + &vport_caps->vport_gvmi); + if (ret) + return ret; + + vport_caps->num = vport_number; + vport_caps->vhca_gvmi = dmn->info.caps.gvmi; + + return 0; +} + +static int dr_domain_query_vports(struct mlx5dr_domain *dmn) +{ + struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps; + struct mlx5dr_cmd_vport_cap *wire_vport; + int vport; + int ret; + + /* Query vports (except wire vport) */ + for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) { + ret = dr_domain_query_vport(dmn, !!vport, vport); + if (ret) + return ret; + } + + /* Last vport is the wire port */ + wire_vport = &dmn->info.caps.vports_caps[vport]; + wire_vport->num = WIRE_PORT; + wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx; + wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx; + wire_vport->vport_gvmi = 0; + wire_vport->vhca_gvmi = dmn->info.caps.gvmi; + + return 0; +} + +static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, + struct mlx5dr_domain *dmn) +{ + int ret; + + if (!dmn->info.caps.eswitch_manager) + return -EOPNOTSUPP; + + ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps); + if (ret) + return ret; + + dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner; + dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx; + dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx; + + dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports, + sizeof(dmn->info.caps.vports_caps[0]), + GFP_KERNEL); + if (!dmn->info.caps.vports_caps) + return -ENOMEM; + + ret = dr_domain_query_vports(dmn); + if (ret) { + mlx5dr_dbg(dmn, "Failed to query vports caps\n"); + goto free_vports_caps; + } + + dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1; + + return 0; + +free_vports_caps: + kfree(dmn->info.caps.vports_caps); + dmn->info.caps.vports_caps = NULL; + return ret; +} + +static int dr_domain_caps_init(struct mlx5_core_dev *mdev, + struct mlx5dr_domain *dmn) +{ + struct mlx5dr_cmd_vport_cap *vport_cap; + int ret; + + if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) { + mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n"); + return -EOPNOTSUPP; + } + + dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev); + + ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps); + if (ret) + return ret; + + ret = dr_domain_query_fdb_caps(mdev, dmn); + if (ret) + return ret; + + switch (dmn->type) { + case MLX5DR_DOMAIN_TYPE_NIC_RX: + if (!dmn->info.caps.rx_sw_owner) + return -ENOTSUPP; + + dmn->info.supp_sw_steering = true; + dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX; + dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address; + dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address; + break; + case MLX5DR_DOMAIN_TYPE_NIC_TX: + if (!dmn->info.caps.tx_sw_owner) + return -ENOTSUPP; + + dmn->info.supp_sw_steering = true; + dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX; + dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address; + dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address; + break; + case MLX5DR_DOMAIN_TYPE_FDB: + if (!dmn->info.caps.eswitch_manager) + return -ENOTSUPP; + + if (!dmn->info.caps.fdb_sw_owner) + return -ENOTSUPP; + + dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX; + dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX; + vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0); + if (!vport_cap) { + mlx5dr_dbg(dmn, "Failed to get esw manager vport\n"); + return -ENOENT; + } + + dmn->info.supp_sw_steering = true; + dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx; + dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx; + dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address; + dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address; + break; + default: + mlx5dr_dbg(dmn, "Invalid domain\n"); + ret = -EINVAL; + break; + } + + return ret; +} + +static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn) +{ + kfree(dmn->info.caps.vports_caps); +} + +struct mlx5dr_domain * +mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) +{ + struct mlx5dr_domain *dmn; + int ret; + + if (type > MLX5DR_DOMAIN_TYPE_FDB) + return NULL; + + dmn = kzalloc(sizeof(*dmn), GFP_KERNEL); + if (!dmn) + return NULL; + + dmn->mdev = mdev; + dmn->type = type; + refcount_set(&dmn->refcount, 1); + mutex_init(&dmn->mutex); + + if (dr_domain_caps_init(mdev, dmn)) { + mlx5dr_dbg(dmn, "Failed init domain, no caps\n"); + goto free_domain; + } + + dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K; + dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K, + dmn->info.caps.log_icm_size); + + if (!dmn->info.supp_sw_steering) { + mlx5dr_err(dmn, "SW steering is not supported\n"); + goto uninit_caps; + } + + /* Allocate resources */ + ret = dr_domain_init_resources(dmn); + if (ret) { + mlx5dr_err(dmn, "Failed init domain resources\n"); + goto uninit_caps; + } + + ret = dr_domain_init_cache(dmn); + if (ret) { + mlx5dr_err(dmn, "Failed initialize domain cache\n"); + goto uninit_resourses; + } + + /* Init CRC table for htbl CRC calculation */ + mlx5dr_crc32_init_table(); + + return dmn; + +uninit_resourses: + dr_domain_uninit_resources(dmn); +uninit_caps: + dr_domain_caps_uninit(dmn); +free_domain: + kfree(dmn); + return NULL; +} + +/* Assure synchronization of the device steering tables with updates made by SW + * insertion. + */ +int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags) +{ + int ret = 0; + + if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) { + mutex_lock(&dmn->mutex); + ret = mlx5dr_send_ring_force_drain(dmn); + mutex_unlock(&dmn->mutex); + if (ret) + return ret; + } + + if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW) + ret = mlx5dr_cmd_sync_steering(dmn->mdev); + + return ret; +} + +int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) +{ + if (refcount_read(&dmn->refcount) > 1) + return -EBUSY; + + /* make sure resources are not used by the hardware */ + mlx5dr_cmd_sync_steering(dmn->mdev); + dr_domain_uninit_cache(dmn); + dr_domain_uninit_resources(dmn); + dr_domain_caps_uninit(dmn); + mutex_destroy(&dmn->mutex); + kfree(dmn); + return 0; +} + +void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, + struct mlx5dr_domain *peer_dmn) +{ + mutex_lock(&dmn->mutex); + + if (dmn->peer_dmn) + refcount_dec(&dmn->peer_dmn->refcount); + + dmn->peer_dmn = peer_dmn; + + if (dmn->peer_dmn) + refcount_inc(&dmn->peer_dmn->refcount); + + mutex_unlock(&dmn->mutex); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c new file mode 100644 index 000000000000..60ef6e6171e3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include <linux/types.h> +#include "dr_types.h" + +struct mlx5dr_fw_recalc_cs_ft * +mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num) +{ + struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; + u32 table_id, group_id, modify_hdr_id; + u64 rx_icm_addr, modify_ttl_action; + int ret; + + recalc_cs_ft = kzalloc(sizeof(*recalc_cs_ft), GFP_KERNEL); + if (!recalc_cs_ft) + return NULL; + + ret = mlx5dr_cmd_create_flow_table(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB, + 0, 0, dmn->info.caps.max_ft_level - 1, + false, true, &rx_icm_addr, &table_id); + if (ret) { + mlx5dr_err(dmn, "Failed creating TTL W/A FW flow table %d\n", ret); + goto free_ttl_tbl; + } + + ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev, + MLX5_FLOW_TABLE_TYPE_FDB, + table_id, &group_id); + if (ret) { + mlx5dr_err(dmn, "Failed creating TTL W/A FW flow group %d\n", ret); + goto destroy_flow_table; + } + + /* Modify TTL action by adding zero to trigger CS recalculation */ + modify_ttl_action = 0; + MLX5_SET(set_action_in, &modify_ttl_action, action_type, MLX5_ACTION_TYPE_ADD); + MLX5_SET(set_action_in, &modify_ttl_action, field, MLX5_ACTION_IN_FIELD_OUT_IP_TTL); + + ret = mlx5dr_cmd_alloc_modify_header(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB, 1, + &modify_ttl_action, + &modify_hdr_id); + if (ret) { + mlx5dr_err(dmn, "Failed modify header TTL %d\n", ret); + goto destroy_flow_group; + } + + ret = mlx5dr_cmd_set_fte_modify_and_vport(dmn->mdev, + MLX5_FLOW_TABLE_TYPE_FDB, + table_id, group_id, modify_hdr_id, + vport_num); + if (ret) { + mlx5dr_err(dmn, "Failed setting TTL W/A flow table entry %d\n", ret); + goto dealloc_modify_header; + } + + recalc_cs_ft->modify_hdr_id = modify_hdr_id; + recalc_cs_ft->rx_icm_addr = rx_icm_addr; + recalc_cs_ft->table_id = table_id; + recalc_cs_ft->group_id = group_id; + + return recalc_cs_ft; + +dealloc_modify_header: + mlx5dr_cmd_dealloc_modify_header(dmn->mdev, modify_hdr_id); +destroy_flow_group: + mlx5dr_cmd_destroy_flow_group(dmn->mdev, + MLX5_FLOW_TABLE_TYPE_FDB, + table_id, group_id); +destroy_flow_table: + mlx5dr_cmd_destroy_flow_table(dmn->mdev, table_id, MLX5_FLOW_TABLE_TYPE_FDB); +free_ttl_tbl: + kfree(recalc_cs_ft); + return NULL; +} + +void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn, + struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft) +{ + mlx5dr_cmd_del_flow_table_entry(dmn->mdev, + MLX5_FLOW_TABLE_TYPE_FDB, + recalc_cs_ft->table_id); + mlx5dr_cmd_dealloc_modify_header(dmn->mdev, recalc_cs_ft->modify_hdr_id); + mlx5dr_cmd_destroy_flow_group(dmn->mdev, + MLX5_FLOW_TABLE_TYPE_FDB, + recalc_cs_ft->table_id, + recalc_cs_ft->group_id); + mlx5dr_cmd_destroy_flow_table(dmn->mdev, + recalc_cs_ft->table_id, + MLX5_FLOW_TABLE_TYPE_FDB); + + kfree(recalc_cs_ft); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c new file mode 100644 index 000000000000..913f1e5aaaf2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c @@ -0,0 +1,570 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include "dr_types.h" + +#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64 +#define DR_ICM_SYNC_THRESHOLD (64 * 1024 * 1024) + +struct mlx5dr_icm_pool; + +struct mlx5dr_icm_bucket { + struct mlx5dr_icm_pool *pool; + + /* Chunks that aren't visible to HW not directly and not in cache */ + struct list_head free_list; + unsigned int free_list_count; + + /* Used chunks, HW may be accessing this memory */ + struct list_head used_list; + unsigned int used_list_count; + + /* HW may be accessing this memory but at some future, + * undetermined time, it might cease to do so. Before deciding to call + * sync_ste, this list is moved to sync_list + */ + struct list_head hot_list; + unsigned int hot_list_count; + + /* Pending sync list, entries from the hot list are moved to this list. + * sync_ste is executed and then sync_list is concatenated to the free list + */ + struct list_head sync_list; + unsigned int sync_list_count; + + u32 total_chunks; + u32 num_of_entries; + u32 entry_size; + /* protect the ICM bucket */ + struct mutex mutex; +}; + +struct mlx5dr_icm_pool { + struct mlx5dr_icm_bucket *buckets; + enum mlx5dr_icm_type icm_type; + enum mlx5dr_icm_chunk_size max_log_chunk_sz; + enum mlx5dr_icm_chunk_size num_of_buckets; + struct list_head icm_mr_list; + /* protect the ICM MR list */ + struct mutex mr_mutex; + struct mlx5dr_domain *dmn; +}; + +struct mlx5dr_icm_dm { + u32 obj_id; + enum mlx5_sw_icm_type type; + phys_addr_t addr; + size_t length; +}; + +struct mlx5dr_icm_mr { + struct mlx5dr_icm_pool *pool; + struct mlx5_core_mkey mkey; + struct mlx5dr_icm_dm dm; + size_t used_length; + size_t length; + u64 icm_start_addr; + struct list_head mr_list; +}; + +static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev, + u32 pd, u64 length, u64 start_addr, int mode, + struct mlx5_core_mkey *mkey) +{ + u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {}; + void *mkc; + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + + MLX5_SET(mkc, mkc, access_mode_1_0, mode); + MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) { + MLX5_SET(mkc, mkc, rw, 1); + MLX5_SET(mkc, mkc, rr, 1); + } + + MLX5_SET64(mkc, mkc, len, length); + MLX5_SET(mkc, mkc, pd, pd); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET64(mkc, mkc, start_addr, start_addr); + + return mlx5_core_create_mkey(mdev, mkey, in, inlen); +} + +static struct mlx5dr_icm_mr * +dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool, + enum mlx5_sw_icm_type type, + size_t align_base) +{ + struct mlx5_core_dev *mdev = pool->dmn->mdev; + struct mlx5dr_icm_mr *icm_mr; + size_t align_diff; + int err; + + icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL); + if (!icm_mr) + return NULL; + + icm_mr->pool = pool; + INIT_LIST_HEAD(&icm_mr->mr_list); + + icm_mr->dm.type = type; + + /* 2^log_biggest_table * entry-size * double-for-alignment */ + icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, + pool->icm_type) * 2; + + err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0, + &icm_mr->dm.addr, &icm_mr->dm.obj_id); + if (err) { + mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err); + goto free_icm_mr; + } + + /* Register device memory */ + err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn, + icm_mr->dm.length, + icm_mr->dm.addr, + MLX5_MKC_ACCESS_MODE_SW_ICM, + &icm_mr->mkey); + if (err) { + mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err); + goto free_dm; + } + + icm_mr->icm_start_addr = icm_mr->dm.addr; + + align_diff = icm_mr->icm_start_addr % align_base; + if (align_diff) + icm_mr->used_length = align_base - align_diff; + + list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list); + + return icm_mr; + +free_dm: + mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0, + icm_mr->dm.addr, icm_mr->dm.obj_id); +free_icm_mr: + kvfree(icm_mr); + return NULL; +} + +static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr) +{ + struct mlx5_core_dev *mdev = icm_mr->pool->dmn->mdev; + struct mlx5dr_icm_dm *dm = &icm_mr->dm; + + list_del(&icm_mr->mr_list); + mlx5_core_destroy_mkey(mdev, &icm_mr->mkey); + mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0, + dm->addr, dm->obj_id); + kvfree(icm_mr); +} + +static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) +{ + struct mlx5dr_icm_bucket *bucket = chunk->bucket; + + chunk->ste_arr = kvzalloc(bucket->num_of_entries * + sizeof(chunk->ste_arr[0]), GFP_KERNEL); + if (!chunk->ste_arr) + return -ENOMEM; + + chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries * + DR_STE_SIZE_REDUCED, GFP_KERNEL); + if (!chunk->hw_ste_arr) + goto out_free_ste_arr; + + chunk->miss_list = kvmalloc(bucket->num_of_entries * + sizeof(chunk->miss_list[0]), GFP_KERNEL); + if (!chunk->miss_list) + goto out_free_hw_ste_arr; + + return 0; + +out_free_hw_ste_arr: + kvfree(chunk->hw_ste_arr); +out_free_ste_arr: + kvfree(chunk->ste_arr); + return -ENOMEM; +} + +static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket) +{ + size_t mr_free_size, mr_req_size, mr_row_size; + struct mlx5dr_icm_pool *pool = bucket->pool; + struct mlx5dr_icm_mr *icm_mr = NULL; + struct mlx5dr_icm_chunk *chunk; + enum mlx5_sw_icm_type dm_type; + size_t align_base; + int i, err = 0; + + mr_req_size = bucket->num_of_entries * bucket->entry_size; + mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, + pool->icm_type); + + if (pool->icm_type == DR_ICM_TYPE_STE) { + dm_type = MLX5_SW_ICM_TYPE_STEERING; + /* Align base is the biggest chunk size / row size */ + align_base = mr_row_size; + } else { + dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY; + /* Align base is 64B */ + align_base = DR_ICM_MODIFY_HDR_ALIGN_BASE; + } + + mutex_lock(&pool->mr_mutex); + if (!list_empty(&pool->icm_mr_list)) { + icm_mr = list_last_entry(&pool->icm_mr_list, + struct mlx5dr_icm_mr, mr_list); + + if (icm_mr) + mr_free_size = icm_mr->dm.length - icm_mr->used_length; + } + + if (!icm_mr || mr_free_size < mr_row_size) { + icm_mr = dr_icm_pool_mr_create(pool, dm_type, align_base); + if (!icm_mr) { + err = -ENOMEM; + goto out_err; + } + } + + /* Create memory aligned chunks */ + for (i = 0; i < mr_row_size / mr_req_size; i++) { + chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + err = -ENOMEM; + goto out_err; + } + + chunk->bucket = bucket; + chunk->rkey = icm_mr->mkey.key; + /* mr start addr is zero based */ + chunk->mr_addr = icm_mr->used_length; + chunk->icm_addr = (uintptr_t)icm_mr->icm_start_addr + icm_mr->used_length; + icm_mr->used_length += mr_req_size; + chunk->num_of_entries = bucket->num_of_entries; + chunk->byte_size = chunk->num_of_entries * bucket->entry_size; + + if (pool->icm_type == DR_ICM_TYPE_STE) { + err = dr_icm_chunk_ste_init(chunk); + if (err) + goto out_free_chunk; + } + + INIT_LIST_HEAD(&chunk->chunk_list); + list_add(&chunk->chunk_list, &bucket->free_list); + bucket->free_list_count++; + bucket->total_chunks++; + } + mutex_unlock(&pool->mr_mutex); + return 0; + +out_free_chunk: + kvfree(chunk); +out_err: + mutex_unlock(&pool->mr_mutex); + return err; +} + +static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) +{ + kvfree(chunk->miss_list); + kvfree(chunk->hw_ste_arr); + kvfree(chunk->ste_arr); +} + +static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk) +{ + struct mlx5dr_icm_bucket *bucket = chunk->bucket; + + list_del(&chunk->chunk_list); + bucket->total_chunks--; + + if (bucket->pool->icm_type == DR_ICM_TYPE_STE) + dr_icm_chunk_ste_cleanup(chunk); + + kvfree(chunk); +} + +static void dr_icm_bucket_init(struct mlx5dr_icm_pool *pool, + struct mlx5dr_icm_bucket *bucket, + enum mlx5dr_icm_chunk_size chunk_size) +{ + if (pool->icm_type == DR_ICM_TYPE_STE) + bucket->entry_size = DR_STE_SIZE; + else + bucket->entry_size = DR_MODIFY_ACTION_SIZE; + + bucket->num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size); + bucket->pool = pool; + mutex_init(&bucket->mutex); + INIT_LIST_HEAD(&bucket->free_list); + INIT_LIST_HEAD(&bucket->used_list); + INIT_LIST_HEAD(&bucket->hot_list); + INIT_LIST_HEAD(&bucket->sync_list); +} + +static void dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket) +{ + struct mlx5dr_icm_chunk *chunk, *next; + + mutex_destroy(&bucket->mutex); + list_splice_tail_init(&bucket->sync_list, &bucket->free_list); + list_splice_tail_init(&bucket->hot_list, &bucket->free_list); + + list_for_each_entry_safe(chunk, next, &bucket->free_list, chunk_list) + dr_icm_chunk_destroy(chunk); + + WARN_ON(bucket->total_chunks != 0); + + /* Cleanup of unreturned chunks */ + list_for_each_entry_safe(chunk, next, &bucket->used_list, chunk_list) + dr_icm_chunk_destroy(chunk); +} + +static u64 dr_icm_hot_mem_size(struct mlx5dr_icm_pool *pool) +{ + u64 hot_size = 0; + int chunk_order; + + for (chunk_order = 0; chunk_order < pool->num_of_buckets; chunk_order++) + hot_size += pool->buckets[chunk_order].hot_list_count * + mlx5dr_icm_pool_chunk_size_to_byte(chunk_order, pool->icm_type); + + return hot_size; +} + +static bool dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool, + struct mlx5dr_icm_bucket *bucket) +{ + u64 bytes_for_sync; + + bytes_for_sync = dr_icm_hot_mem_size(pool); + if (bytes_for_sync < DR_ICM_SYNC_THRESHOLD || !bucket->hot_list_count) + return false; + + return true; +} + +static void dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket) +{ + list_splice_tail_init(&bucket->hot_list, &bucket->sync_list); + bucket->sync_list_count += bucket->hot_list_count; + bucket->hot_list_count = 0; +} + +static void dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket) +{ + list_splice_tail_init(&bucket->sync_list, &bucket->free_list); + bucket->free_list_count += bucket->sync_list_count; + bucket->sync_list_count = 0; +} + +static void dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket) +{ + list_splice_tail_init(&bucket->sync_list, &bucket->hot_list); + bucket->hot_list_count += bucket->sync_list_count; + bucket->sync_list_count = 0; +} + +static void dr_icm_chill_buckets_start(struct mlx5dr_icm_pool *pool, + struct mlx5dr_icm_bucket *cb, + bool buckets[DR_CHUNK_SIZE_MAX]) +{ + struct mlx5dr_icm_bucket *bucket; + int i; + + for (i = 0; i < pool->num_of_buckets; i++) { + bucket = &pool->buckets[i]; + if (bucket == cb) { + dr_icm_chill_bucket_start(bucket); + continue; + } + + /* Freeing the mutex is done at the end of that process, after + * sync_ste was executed at dr_icm_chill_buckets_end func. + */ + if (mutex_trylock(&bucket->mutex)) { + dr_icm_chill_bucket_start(bucket); + buckets[i] = true; + } + } +} + +static void dr_icm_chill_buckets_end(struct mlx5dr_icm_pool *pool, + struct mlx5dr_icm_bucket *cb, + bool buckets[DR_CHUNK_SIZE_MAX]) +{ + struct mlx5dr_icm_bucket *bucket; + int i; + + for (i = 0; i < pool->num_of_buckets; i++) { + bucket = &pool->buckets[i]; + if (bucket == cb) { + dr_icm_chill_bucket_end(bucket); + continue; + } + + if (!buckets[i]) + continue; + + dr_icm_chill_bucket_end(bucket); + mutex_unlock(&bucket->mutex); + } +} + +static void dr_icm_chill_buckets_abort(struct mlx5dr_icm_pool *pool, + struct mlx5dr_icm_bucket *cb, + bool buckets[DR_CHUNK_SIZE_MAX]) +{ + struct mlx5dr_icm_bucket *bucket; + int i; + + for (i = 0; i < pool->num_of_buckets; i++) { + bucket = &pool->buckets[i]; + if (bucket == cb) { + dr_icm_chill_bucket_abort(bucket); + continue; + } + + if (!buckets[i]) + continue; + + dr_icm_chill_bucket_abort(bucket); + mutex_unlock(&bucket->mutex); + } +} + +/* Allocate an ICM chunk, each chunk holds a piece of ICM memory and + * also memory used for HW STE management for optimizations. + */ +struct mlx5dr_icm_chunk * +mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool, + enum mlx5dr_icm_chunk_size chunk_size) +{ + struct mlx5dr_icm_chunk *chunk = NULL; /* Fix compilation warning */ + bool buckets[DR_CHUNK_SIZE_MAX] = {}; + struct mlx5dr_icm_bucket *bucket; + int err; + + if (chunk_size > pool->max_log_chunk_sz) + return NULL; + + bucket = &pool->buckets[chunk_size]; + + mutex_lock(&bucket->mutex); + + /* Take chunk from pool if available, otherwise allocate new chunks */ + if (list_empty(&bucket->free_list)) { + if (dr_icm_reuse_hot_entries(pool, bucket)) { + dr_icm_chill_buckets_start(pool, bucket, buckets); + err = mlx5dr_cmd_sync_steering(pool->dmn->mdev); + if (err) { + dr_icm_chill_buckets_abort(pool, bucket, buckets); + mlx5dr_dbg(pool->dmn, "Sync_steering failed\n"); + chunk = NULL; + goto out; + } + dr_icm_chill_buckets_end(pool, bucket, buckets); + } else { + dr_icm_chunks_create(bucket); + } + } + + if (!list_empty(&bucket->free_list)) { + chunk = list_last_entry(&bucket->free_list, + struct mlx5dr_icm_chunk, + chunk_list); + if (chunk) { + list_del_init(&chunk->chunk_list); + list_add_tail(&chunk->chunk_list, &bucket->used_list); + bucket->free_list_count--; + bucket->used_list_count++; + } + } +out: + mutex_unlock(&bucket->mutex); + return chunk; +} + +void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk) +{ + struct mlx5dr_icm_bucket *bucket = chunk->bucket; + + if (bucket->pool->icm_type == DR_ICM_TYPE_STE) { + memset(chunk->ste_arr, 0, + bucket->num_of_entries * sizeof(chunk->ste_arr[0])); + memset(chunk->hw_ste_arr, 0, + bucket->num_of_entries * DR_STE_SIZE_REDUCED); + } + + mutex_lock(&bucket->mutex); + list_del_init(&chunk->chunk_list); + list_add_tail(&chunk->chunk_list, &bucket->hot_list); + bucket->hot_list_count++; + bucket->used_list_count--; + mutex_unlock(&bucket->mutex); +} + +struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, + enum mlx5dr_icm_type icm_type) +{ + enum mlx5dr_icm_chunk_size max_log_chunk_sz; + struct mlx5dr_icm_pool *pool; + int i; + + if (icm_type == DR_ICM_TYPE_STE) + max_log_chunk_sz = dmn->info.max_log_sw_icm_sz; + else + max_log_chunk_sz = dmn->info.max_log_action_icm_sz; + + pool = kvzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return NULL; + + pool->buckets = kcalloc(max_log_chunk_sz + 1, + sizeof(pool->buckets[0]), + GFP_KERNEL); + if (!pool->buckets) + goto free_pool; + + pool->dmn = dmn; + pool->icm_type = icm_type; + pool->max_log_chunk_sz = max_log_chunk_sz; + pool->num_of_buckets = max_log_chunk_sz + 1; + INIT_LIST_HEAD(&pool->icm_mr_list); + + for (i = 0; i < pool->num_of_buckets; i++) + dr_icm_bucket_init(pool, &pool->buckets[i], i); + + mutex_init(&pool->mr_mutex); + + return pool; + +free_pool: + kvfree(pool); + return NULL; +} + +void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool) +{ + struct mlx5dr_icm_mr *icm_mr, *next; + int i; + + mutex_destroy(&pool->mr_mutex); + + list_for_each_entry_safe(icm_mr, next, &pool->icm_mr_list, mr_list) + dr_icm_pool_mr_destroy(icm_mr); + + for (i = 0; i < pool->num_of_buckets; i++) + dr_icm_bucket_cleanup(&pool->buckets[i]); + + kfree(pool->buckets); + kvfree(pool); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c new file mode 100644 index 000000000000..01008cd66f75 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include "dr_types.h" + +static bool dr_mask_is_smac_set(struct mlx5dr_match_spec *spec) +{ + return (spec->smac_47_16 || spec->smac_15_0); +} + +static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec) +{ + return (spec->dmac_47_16 || spec->dmac_15_0); +} + +static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec) +{ + return (spec->src_ip_127_96 || spec->src_ip_95_64 || + spec->src_ip_63_32 || spec->src_ip_31_0); +} + +static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec) +{ + return (spec->dst_ip_127_96 || spec->dst_ip_95_64 || + spec->dst_ip_63_32 || spec->dst_ip_31_0); +} + +static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec) +{ + return (spec->ip_protocol || spec->frag || spec->tcp_flags || + spec->ip_ecn || spec->ip_dscp); +} + +static bool dr_mask_is_tcp_udp_base_set(struct mlx5dr_match_spec *spec) +{ + return (spec->tcp_sport || spec->tcp_dport || + spec->udp_sport || spec->udp_dport); +} + +static bool dr_mask_is_ipv4_set(struct mlx5dr_match_spec *spec) +{ + return (spec->dst_ip_31_0 || spec->src_ip_31_0); +} + +static bool dr_mask_is_ipv4_5_tuple_set(struct mlx5dr_match_spec *spec) +{ + return (dr_mask_is_l3_base_set(spec) || + dr_mask_is_tcp_udp_base_set(spec) || + dr_mask_is_ipv4_set(spec)); +} + +static bool dr_mask_is_eth_l2_tnl_set(struct mlx5dr_match_misc *misc) +{ + return misc->vxlan_vni; +} + +static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec) +{ + return spec->ttl_hoplimit; +} + +#define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \ + (_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \ + (_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \ + (_spec).ethertype || (_spec).ip_version || \ + (_misc)._inner_outer##_second_vid || \ + (_misc)._inner_outer##_second_cfi || \ + (_misc)._inner_outer##_second_prio || \ + (_misc)._inner_outer##_second_cvlan_tag || \ + (_misc)._inner_outer##_second_svlan_tag) + +#define DR_MASK_IS_ETH_L4_SET(_spec, _misc, _inner_outer) ( \ + dr_mask_is_l3_base_set(&(_spec)) || \ + dr_mask_is_tcp_udp_base_set(&(_spec)) || \ + dr_mask_is_ttl_set(&(_spec)) || \ + (_misc)._inner_outer##_ipv6_flow_label) + +#define DR_MASK_IS_ETH_L4_MISC_SET(_misc3, _inner_outer) ( \ + (_misc3)._inner_outer##_tcp_seq_num || \ + (_misc3)._inner_outer##_tcp_ack_num) + +#define DR_MASK_IS_FIRST_MPLS_SET(_misc2, _inner_outer) ( \ + (_misc2)._inner_outer##_first_mpls_label || \ + (_misc2)._inner_outer##_first_mpls_exp || \ + (_misc2)._inner_outer##_first_mpls_s_bos || \ + (_misc2)._inner_outer##_first_mpls_ttl) + +static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc) +{ + return (misc->gre_key_h || misc->gre_key_l || + misc->gre_protocol || misc->gre_c_present || + misc->gre_k_present || misc->gre_s_present); +} + +#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET(_misc2, gre_udp) ( \ + (_misc2).outer_first_mpls_over_##gre_udp##_label || \ + (_misc2).outer_first_mpls_over_##gre_udp##_exp || \ + (_misc2).outer_first_mpls_over_##gre_udp##_s_bos || \ + (_misc2).outer_first_mpls_over_##gre_udp##_ttl) + +#define DR_MASK_IS_FLEX_PARSER_0_SET(_misc2) ( \ + DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \ + DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp)) + +static bool dr_mask_is_flex_parser_tnl_set(struct mlx5dr_match_misc3 *misc3) +{ + return (misc3->outer_vxlan_gpe_vni || + misc3->outer_vxlan_gpe_next_protocol || + misc3->outer_vxlan_gpe_flags); +} + +static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3) +{ + return (misc3->icmpv6_type || misc3->icmpv6_code || + misc3->icmpv6_header_data); +} + +static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2) +{ + return misc2->metadata_reg_a; +} + +static bool dr_mask_is_reg_c_0_3_set(struct mlx5dr_match_misc2 *misc2) +{ + return (misc2->metadata_reg_c_0 || misc2->metadata_reg_c_1 || + misc2->metadata_reg_c_2 || misc2->metadata_reg_c_3); +} + +static bool dr_mask_is_reg_c_4_7_set(struct mlx5dr_match_misc2 *misc2) +{ + return (misc2->metadata_reg_c_4 || misc2->metadata_reg_c_5 || + misc2->metadata_reg_c_6 || misc2->metadata_reg_c_7); +} + +static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc) +{ + return (misc->source_sqn || misc->source_port); +} + +static bool +dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_domain *dmn) +{ + return dmn->info.caps.flex_protocols & + MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; +} + +int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + bool ipv6) +{ + if (ipv6) { + nic_matcher->ste_builder = nic_matcher->ste_builder6; + nic_matcher->num_of_builders = nic_matcher->num_of_builders6; + } else { + nic_matcher->ste_builder = nic_matcher->ste_builder4; + nic_matcher->num_of_builders = nic_matcher->num_of_builders4; + } + + if (!nic_matcher->num_of_builders) { + mlx5dr_dbg(matcher->tbl->dmn, + "Rule not supported on this matcher due to IP related fields\n"); + return -EINVAL; + } + + return 0; +} + +static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + bool ipv6) +{ + struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_match_param mask = {}; + struct mlx5dr_match_misc3 *misc3; + struct mlx5dr_ste_build *sb; + u8 *num_of_builders; + bool inner, rx; + int idx = 0; + int ret, i; + + if (ipv6) { + sb = nic_matcher->ste_builder6; + num_of_builders = &nic_matcher->num_of_builders6; + } else { + sb = nic_matcher->ste_builder4; + num_of_builders = &nic_matcher->num_of_builders4; + } + + rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX; + + /* Create a temporary mask to track and clear used mask fields */ + if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER) + mask.outer = matcher->mask.outer; + + if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC) + mask.misc = matcher->mask.misc; + + if (matcher->match_criteria & DR_MATCHER_CRITERIA_INNER) + mask.inner = matcher->mask.inner; + + if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC2) + mask.misc2 = matcher->mask.misc2; + + if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3) + mask.misc3 = matcher->mask.misc3; + + ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria, + &matcher->mask, NULL); + if (ret) + return ret; + + /* Outer */ + if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER | + DR_MATCHER_CRITERIA_MISC | + DR_MATCHER_CRITERIA_MISC2 | + DR_MATCHER_CRITERIA_MISC3)) { + inner = false; + + if (dr_mask_is_wqe_metadata_set(&mask.misc2)) + mlx5dr_ste_build_general_purpose(&sb[idx++], &mask, inner, rx); + + if (dr_mask_is_reg_c_0_3_set(&mask.misc2)) + mlx5dr_ste_build_register_0(&sb[idx++], &mask, inner, rx); + + if (dr_mask_is_reg_c_4_7_set(&mask.misc2)) + mlx5dr_ste_build_register_1(&sb[idx++], &mask, inner, rx); + + if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) && + (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || + dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) { + ret = mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask, + &dmn->info.caps, + inner, rx); + if (ret) + return ret; + } + + if (dr_mask_is_smac_set(&mask.outer) && + dr_mask_is_dmac_set(&mask.outer)) { + ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask, + inner, rx); + if (ret) + return ret; + } + + if (dr_mask_is_smac_set(&mask.outer)) + mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx); + + if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer)) + mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx); + + if (ipv6) { + if (dr_mask_is_dst_addr_set(&mask.outer)) + mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask, + inner, rx); + + if (dr_mask_is_src_addr_set(&mask.outer)) + mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask, + inner, rx); + + if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer)) + mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask, + inner, rx); + } else { + if (dr_mask_is_ipv4_5_tuple_set(&mask.outer)) + mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask, + inner, rx); + + if (dr_mask_is_ttl_set(&mask.outer)) + mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask, + inner, rx); + } + + if (dr_mask_is_flex_parser_tnl_set(&mask.misc3) && + dr_matcher_supp_flex_parser_vxlan_gpe(dmn)) + mlx5dr_ste_build_flex_parser_tnl(&sb[idx++], &mask, + inner, rx); + + if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) + mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); + + if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer)) + mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx); + + if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2)) + mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask, + inner, rx); + + misc3 = &mask.misc3; + if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc3) && + mlx5dr_matcher_supp_flex_parser_icmp_v4(&dmn->info.caps)) || + (dr_mask_is_flex_parser_icmpv6_set(&mask.misc3) && + mlx5dr_matcher_supp_flex_parser_icmp_v6(&dmn->info.caps))) { + ret = mlx5dr_ste_build_flex_parser_1(&sb[idx++], + &mask, &dmn->info.caps, + inner, rx); + if (ret) + return ret; + } + if (dr_mask_is_gre_set(&mask.misc)) + mlx5dr_ste_build_gre(&sb[idx++], &mask, inner, rx); + } + + /* Inner */ + if (matcher->match_criteria & (DR_MATCHER_CRITERIA_INNER | + DR_MATCHER_CRITERIA_MISC | + DR_MATCHER_CRITERIA_MISC2 | + DR_MATCHER_CRITERIA_MISC3)) { + inner = true; + + if (dr_mask_is_eth_l2_tnl_set(&mask.misc)) + mlx5dr_ste_build_eth_l2_tnl(&sb[idx++], &mask, inner, rx); + + if (dr_mask_is_smac_set(&mask.inner) && + dr_mask_is_dmac_set(&mask.inner)) { + ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], + &mask, inner, rx); + if (ret) + return ret; + } + + if (dr_mask_is_smac_set(&mask.inner)) + mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx); + + if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner)) + mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx); + + if (ipv6) { + if (dr_mask_is_dst_addr_set(&mask.inner)) + mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask, + inner, rx); + + if (dr_mask_is_src_addr_set(&mask.inner)) + mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask, + inner, rx); + + if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner)) + mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask, + inner, rx); + } else { + if (dr_mask_is_ipv4_5_tuple_set(&mask.inner)) + mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask, + inner, rx); + + if (dr_mask_is_ttl_set(&mask.inner)) + mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask, + inner, rx); + } + + if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner)) + mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); + + if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner)) + mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx); + + if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2)) + mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask, inner, rx); + } + /* Empty matcher, takes all */ + if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) + mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx); + + if (idx == 0) { + mlx5dr_dbg(dmn, "Cannot generate any valid rules from mask\n"); + return -EINVAL; + } + + /* Check that all mask fields were consumed */ + for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) { + if (((u8 *)&mask)[i] != 0) { + mlx5dr_info(dmn, "Mask contains unsupported parameters\n"); + return -EOPNOTSUPP; + } + } + + *num_of_builders = idx; + + return 0; +} + +static int dr_matcher_connect(struct mlx5dr_domain *dmn, + struct mlx5dr_matcher_rx_tx *curr_nic_matcher, + struct mlx5dr_matcher_rx_tx *next_nic_matcher, + struct mlx5dr_matcher_rx_tx *prev_nic_matcher) +{ + struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl; + struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn; + struct mlx5dr_htbl_connect_info info; + struct mlx5dr_ste_htbl *prev_htbl; + int ret; + + /* Connect end anchor hash table to next_htbl or to the default address */ + if (next_nic_matcher) { + info.type = CONNECT_HIT; + info.hit_next_htbl = next_nic_matcher->s_htbl; + } else { + info.type = CONNECT_MISS; + info.miss_icm_addr = nic_tbl->default_icm_addr; + } + ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, + curr_nic_matcher->e_anchor, + &info, info.type == CONNECT_HIT); + if (ret) + return ret; + + /* Connect start hash table to end anchor */ + info.type = CONNECT_MISS; + info.miss_icm_addr = curr_nic_matcher->e_anchor->chunk->icm_addr; + ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, + curr_nic_matcher->s_htbl, + &info, false); + if (ret) + return ret; + + /* Connect previous hash table to matcher start hash table */ + if (prev_nic_matcher) + prev_htbl = prev_nic_matcher->e_anchor; + else + prev_htbl = nic_tbl->s_anchor; + + info.type = CONNECT_HIT; + info.hit_next_htbl = curr_nic_matcher->s_htbl; + ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_htbl, + &info, true); + if (ret) + return ret; + + /* Update the pointing ste and next hash table */ + curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->ste_arr; + prev_htbl->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl; + + if (next_nic_matcher) { + next_nic_matcher->s_htbl->pointing_ste = curr_nic_matcher->e_anchor->ste_arr; + curr_nic_matcher->e_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl; + } + + return 0; +} + +static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher) +{ + struct mlx5dr_matcher *next_matcher, *prev_matcher, *tmp_matcher; + struct mlx5dr_table *tbl = matcher->tbl; + struct mlx5dr_domain *dmn = tbl->dmn; + bool first = true; + int ret; + + next_matcher = NULL; + if (!list_empty(&tbl->matcher_list)) + list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) { + if (tmp_matcher->prio >= matcher->prio) { + next_matcher = tmp_matcher; + break; + } + first = false; + } + + prev_matcher = NULL; + if (next_matcher && !first) + prev_matcher = list_entry(next_matcher->matcher_list.prev, + struct mlx5dr_matcher, + matcher_list); + else if (!first) + prev_matcher = list_entry(tbl->matcher_list.prev, + struct mlx5dr_matcher, + matcher_list); + + if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || + dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) { + ret = dr_matcher_connect(dmn, &matcher->rx, + next_matcher ? &next_matcher->rx : NULL, + prev_matcher ? &prev_matcher->rx : NULL); + if (ret) + return ret; + } + + if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || + dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) { + ret = dr_matcher_connect(dmn, &matcher->tx, + next_matcher ? &next_matcher->tx : NULL, + prev_matcher ? &prev_matcher->tx : NULL); + if (ret) + return ret; + } + + if (prev_matcher) + list_add(&matcher->matcher_list, &prev_matcher->matcher_list); + else if (next_matcher) + list_add_tail(&matcher->matcher_list, + &next_matcher->matcher_list); + else + list_add(&matcher->matcher_list, &tbl->matcher_list); + + return 0; +} + +static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher) +{ + mlx5dr_htbl_put(nic_matcher->s_htbl); + mlx5dr_htbl_put(nic_matcher->e_anchor); +} + +static void dr_matcher_uninit_fdb(struct mlx5dr_matcher *matcher) +{ + dr_matcher_uninit_nic(&matcher->rx); + dr_matcher_uninit_nic(&matcher->tx); +} + +static void dr_matcher_uninit(struct mlx5dr_matcher *matcher) +{ + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + + switch (dmn->type) { + case MLX5DR_DOMAIN_TYPE_NIC_RX: + dr_matcher_uninit_nic(&matcher->rx); + break; + case MLX5DR_DOMAIN_TYPE_NIC_TX: + dr_matcher_uninit_nic(&matcher->tx); + break; + case MLX5DR_DOMAIN_TYPE_FDB: + dr_matcher_uninit_fdb(matcher); + break; + default: + WARN_ON(true); + break; + } +} + +static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher) +{ + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + int ret, ret_v4, ret_v6; + + ret_v4 = dr_matcher_set_ste_builders(matcher, nic_matcher, false); + ret_v6 = dr_matcher_set_ste_builders(matcher, nic_matcher, true); + + if (ret_v4 && ret_v6) { + mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n"); + return -EINVAL; + } + + if (!ret_v4) + nic_matcher->ste_builder = nic_matcher->ste_builder4; + else + nic_matcher->ste_builder = nic_matcher->ste_builder6; + + nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, + DR_CHUNK_SIZE_1, + MLX5DR_STE_LU_TYPE_DONT_CARE, + 0); + if (!nic_matcher->e_anchor) + return -ENOMEM; + + nic_matcher->s_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, + DR_CHUNK_SIZE_1, + nic_matcher->ste_builder[0].lu_type, + nic_matcher->ste_builder[0].byte_mask); + if (!nic_matcher->s_htbl) { + ret = -ENOMEM; + goto free_e_htbl; + } + + /* make sure the tables exist while empty */ + mlx5dr_htbl_get(nic_matcher->s_htbl); + mlx5dr_htbl_get(nic_matcher->e_anchor); + + return 0; + +free_e_htbl: + mlx5dr_ste_htbl_free(nic_matcher->e_anchor); + return ret; +} + +static int dr_matcher_init_fdb(struct mlx5dr_matcher *matcher) +{ + int ret; + + ret = dr_matcher_init_nic(matcher, &matcher->rx); + if (ret) + return ret; + + ret = dr_matcher_init_nic(matcher, &matcher->tx); + if (ret) + goto uninit_nic_rx; + + return 0; + +uninit_nic_rx: + dr_matcher_uninit_nic(&matcher->rx); + return ret; +} + +static int dr_matcher_init(struct mlx5dr_matcher *matcher, + struct mlx5dr_match_parameters *mask) +{ + struct mlx5dr_table *tbl = matcher->tbl; + struct mlx5dr_domain *dmn = tbl->dmn; + int ret; + + if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) { + mlx5dr_info(dmn, "Invalid match criteria attribute\n"); + return -EINVAL; + } + + if (mask) { + if (mask->match_sz > sizeof(struct mlx5dr_match_param)) { + mlx5dr_info(dmn, "Invalid match size attribute\n"); + return -EINVAL; + } + mlx5dr_ste_copy_param(matcher->match_criteria, + &matcher->mask, mask); + } + + switch (dmn->type) { + case MLX5DR_DOMAIN_TYPE_NIC_RX: + matcher->rx.nic_tbl = &tbl->rx; + ret = dr_matcher_init_nic(matcher, &matcher->rx); + break; + case MLX5DR_DOMAIN_TYPE_NIC_TX: + matcher->tx.nic_tbl = &tbl->tx; + ret = dr_matcher_init_nic(matcher, &matcher->tx); + break; + case MLX5DR_DOMAIN_TYPE_FDB: + matcher->rx.nic_tbl = &tbl->rx; + matcher->tx.nic_tbl = &tbl->tx; + ret = dr_matcher_init_fdb(matcher); + break; + default: + WARN_ON(true); + return -EINVAL; + } + + return ret; +} + +struct mlx5dr_matcher * +mlx5dr_matcher_create(struct mlx5dr_table *tbl, + u16 priority, + u8 match_criteria_enable, + struct mlx5dr_match_parameters *mask) +{ + struct mlx5dr_matcher *matcher; + int ret; + + refcount_inc(&tbl->refcount); + + matcher = kzalloc(sizeof(*matcher), GFP_KERNEL); + if (!matcher) + goto dec_ref; + + matcher->tbl = tbl; + matcher->prio = priority; + matcher->match_criteria = match_criteria_enable; + refcount_set(&matcher->refcount, 1); + INIT_LIST_HEAD(&matcher->matcher_list); + + mutex_lock(&tbl->dmn->mutex); + + ret = dr_matcher_init(matcher, mask); + if (ret) + goto free_matcher; + + ret = dr_matcher_add_to_tbl(matcher); + if (ret) + goto matcher_uninit; + + mutex_unlock(&tbl->dmn->mutex); + + return matcher; + +matcher_uninit: + dr_matcher_uninit(matcher); +free_matcher: + mutex_unlock(&tbl->dmn->mutex); + kfree(matcher); +dec_ref: + refcount_dec(&tbl->refcount); + return NULL; +} + +static int dr_matcher_disconnect(struct mlx5dr_domain *dmn, + struct mlx5dr_table_rx_tx *nic_tbl, + struct mlx5dr_matcher_rx_tx *next_nic_matcher, + struct mlx5dr_matcher_rx_tx *prev_nic_matcher) +{ + struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn; + struct mlx5dr_htbl_connect_info info; + struct mlx5dr_ste_htbl *prev_anchor; + + if (prev_nic_matcher) + prev_anchor = prev_nic_matcher->e_anchor; + else + prev_anchor = nic_tbl->s_anchor; + + /* Connect previous anchor hash table to next matcher or to the default address */ + if (next_nic_matcher) { + info.type = CONNECT_HIT; + info.hit_next_htbl = next_nic_matcher->s_htbl; + next_nic_matcher->s_htbl->pointing_ste = prev_anchor->ste_arr; + prev_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl; + } else { + info.type = CONNECT_MISS; + info.miss_icm_addr = nic_tbl->default_icm_addr; + prev_anchor->ste_arr[0].next_htbl = NULL; + } + + return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor, + &info, true); +} + +static int dr_matcher_remove_from_tbl(struct mlx5dr_matcher *matcher) +{ + struct mlx5dr_matcher *prev_matcher, *next_matcher; + struct mlx5dr_table *tbl = matcher->tbl; + struct mlx5dr_domain *dmn = tbl->dmn; + int ret = 0; + + if (list_is_last(&matcher->matcher_list, &tbl->matcher_list)) + next_matcher = NULL; + else + next_matcher = list_next_entry(matcher, matcher_list); + + if (matcher->matcher_list.prev == &tbl->matcher_list) + prev_matcher = NULL; + else + prev_matcher = list_prev_entry(matcher, matcher_list); + + if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || + dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) { + ret = dr_matcher_disconnect(dmn, &tbl->rx, + next_matcher ? &next_matcher->rx : NULL, + prev_matcher ? &prev_matcher->rx : NULL); + if (ret) + return ret; + } + + if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || + dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) { + ret = dr_matcher_disconnect(dmn, &tbl->tx, + next_matcher ? &next_matcher->tx : NULL, + prev_matcher ? &prev_matcher->tx : NULL); + if (ret) + return ret; + } + + list_del(&matcher->matcher_list); + + return 0; +} + +int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher) +{ + struct mlx5dr_table *tbl = matcher->tbl; + + if (refcount_read(&matcher->refcount) > 1) + return -EBUSY; + + mutex_lock(&tbl->dmn->mutex); + + dr_matcher_remove_from_tbl(matcher); + dr_matcher_uninit(matcher); + refcount_dec(&matcher->tbl->refcount); + + mutex_unlock(&tbl->dmn->mutex); + kfree(matcher); + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c new file mode 100644 index 000000000000..3bc3f66b8fa8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -0,0 +1,1243 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include "dr_types.h" + +#define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES) + +struct mlx5dr_rule_action_member { + struct mlx5dr_action *action; + struct list_head list; +}; + +static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste, + struct list_head *miss_list, + struct list_head *send_list) +{ + struct mlx5dr_ste_send_info *ste_info_last; + struct mlx5dr_ste *last_ste; + + /* The new entry will be inserted after the last */ + last_ste = list_entry(miss_list->prev, struct mlx5dr_ste, miss_list_node); + WARN_ON(!last_ste); + + ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL); + if (!ste_info_last) + return -ENOMEM; + + mlx5dr_ste_set_miss_addr(last_ste->hw_ste, + mlx5dr_ste_get_icm_addr(new_last_ste)); + list_add_tail(&new_last_ste->miss_list_node, miss_list); + + mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_REDUCED, + 0, last_ste->hw_ste, + ste_info_last, send_list, true); + + return 0; +} + +static struct mlx5dr_ste * +dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + u8 *hw_ste) +{ + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_ste_htbl *new_htbl; + struct mlx5dr_ste *ste; + + /* Create new table for miss entry */ + new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, + DR_CHUNK_SIZE_1, + MLX5DR_STE_LU_TYPE_DONT_CARE, + 0); + if (!new_htbl) { + mlx5dr_dbg(dmn, "Failed allocating collision table\n"); + return NULL; + } + + /* One and only entry, never grows */ + ste = new_htbl->ste_arr; + mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); + mlx5dr_htbl_get(new_htbl); + + return ste; +} + +static struct mlx5dr_ste * +dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + u8 *hw_ste, + struct mlx5dr_ste *orig_ste) +{ + struct mlx5dr_ste *ste; + + ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste); + if (!ste) { + mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n"); + return NULL; + } + + ste->ste_chain_location = orig_ste->ste_chain_location; + + /* In collision entry, all members share the same miss_list_head */ + ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste); + + /* Next table */ + if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste, + DR_CHUNK_SIZE_1)) { + mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n"); + goto free_tbl; + } + + return ste; + +free_tbl: + mlx5dr_ste_free(ste, matcher, nic_matcher); + return NULL; +} + +static int +dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info, + struct mlx5dr_domain *dmn) +{ + int ret; + + list_del(&ste_info->send_list); + ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data, + ste_info->size, ste_info->offset); + if (ret) + goto out; + /* Copy data to ste, only reduced size, the last 16B (mask) + * is already written to the hw. + */ + memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED); + +out: + kfree(ste_info); + return ret; +} + +static int dr_rule_send_update_list(struct list_head *send_ste_list, + struct mlx5dr_domain *dmn, + bool is_reverse) +{ + struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info; + int ret; + + if (is_reverse) { + list_for_each_entry_safe_reverse(ste_info, tmp_ste_info, + send_ste_list, send_list) { + ret = dr_rule_handle_one_ste_in_update_list(ste_info, + dmn); + if (ret) + return ret; + } + } else { + list_for_each_entry_safe(ste_info, tmp_ste_info, + send_ste_list, send_list) { + ret = dr_rule_handle_one_ste_in_update_list(ste_info, + dmn); + if (ret) + return ret; + } + } + + return 0; +} + +static struct mlx5dr_ste * +dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste) +{ + struct mlx5dr_ste *ste; + + if (list_empty(miss_list)) + return NULL; + + /* Check if hw_ste is present in the list */ + list_for_each_entry(ste, miss_list, miss_list_node) { + if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste)) + return ste; + } + + return NULL; +} + +static struct mlx5dr_ste * +dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct list_head *update_list, + struct mlx5dr_ste *col_ste, + u8 *hw_ste) +{ + struct mlx5dr_ste *new_ste; + int ret; + + new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste); + if (!new_ste) + return NULL; + + /* In collision entry, all members share the same miss_list_head */ + new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste); + + /* Update the previous from the list */ + ret = dr_rule_append_to_miss_list(new_ste, + mlx5dr_ste_get_miss_list(col_ste), + update_list); + if (ret) { + mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n"); + goto err_exit; + } + + return new_ste; + +err_exit: + mlx5dr_ste_free(new_ste, matcher, nic_matcher); + return NULL; +} + +static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_ste *cur_ste, + struct mlx5dr_ste *new_ste) +{ + new_ste->next_htbl = cur_ste->next_htbl; + new_ste->ste_chain_location = cur_ste->ste_chain_location; + + if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location)) + new_ste->next_htbl->pointing_ste = new_ste; + + /* We need to copy the refcount since this ste + * may have been traversed several times + */ + refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount)); + + /* Link old STEs rule_mem list to the new ste */ + mlx5dr_rule_update_rule_member(cur_ste, new_ste); + INIT_LIST_HEAD(&new_ste->rule_list); + list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list); +} + +static struct mlx5dr_ste * +dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_ste *cur_ste, + struct mlx5dr_ste_htbl *new_htbl, + struct list_head *update_list) +{ + struct mlx5dr_ste_send_info *ste_info; + bool use_update_list = false; + u8 hw_ste[DR_STE_SIZE] = {}; + struct mlx5dr_ste *new_ste; + int new_idx; + u8 sb_idx; + + /* Copy STE mask from the matcher */ + sb_idx = cur_ste->ste_chain_location - 1; + mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask); + + /* Copy STE control and tag */ + memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED); + mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); + + new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl); + new_ste = &new_htbl->ste_arr[new_idx]; + + if (mlx5dr_ste_not_used_ste(new_ste)) { + mlx5dr_htbl_get(new_htbl); + list_add_tail(&new_ste->miss_list_node, + mlx5dr_ste_get_miss_list(new_ste)); + } else { + new_ste = dr_rule_rehash_handle_collision(matcher, + nic_matcher, + update_list, + new_ste, + hw_ste); + if (!new_ste) { + mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n", + new_idx); + return NULL; + } + new_htbl->ctrl.num_of_collisions++; + use_update_list = true; + } + + memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED); + + new_htbl->ctrl.num_of_valid_entries++; + + if (use_update_list) { + ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL); + if (!ste_info) + goto err_exit; + + mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, + hw_ste, ste_info, + update_list, true); + } + + dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste); + + return new_ste; + +err_exit: + mlx5dr_ste_free(new_ste, matcher, nic_matcher); + return NULL; +} + +static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct list_head *cur_miss_list, + struct mlx5dr_ste_htbl *new_htbl, + struct list_head *update_list) +{ + struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste; + + if (list_empty(cur_miss_list)) + return 0; + + list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) { + new_ste = dr_rule_rehash_copy_ste(matcher, + nic_matcher, + cur_ste, + new_htbl, + update_list); + if (!new_ste) + goto err_insert; + + list_del(&cur_ste->miss_list_node); + mlx5dr_htbl_put(cur_ste->htbl); + } + return 0; + +err_insert: + mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n"); + WARN_ON(true); + return -EINVAL; +} + +static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_ste_htbl *cur_htbl, + struct mlx5dr_ste_htbl *new_htbl, + struct list_head *update_list) +{ + struct mlx5dr_ste *cur_ste; + int cur_entries; + int err = 0; + int i; + + cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size); + + if (cur_entries < 1) { + mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n"); + return -EINVAL; + } + + for (i = 0; i < cur_entries; i++) { + cur_ste = &cur_htbl->ste_arr[i]; + if (mlx5dr_ste_not_used_ste(cur_ste)) /* Empty, nothing to copy */ + continue; + + err = dr_rule_rehash_copy_miss_list(matcher, + nic_matcher, + mlx5dr_ste_get_miss_list(cur_ste), + new_htbl, + update_list); + if (err) + goto clean_copy; + } + +clean_copy: + return err; +} + +static struct mlx5dr_ste_htbl * +dr_rule_rehash_htbl(struct mlx5dr_rule *rule, + struct mlx5dr_rule_rx_tx *nic_rule, + struct mlx5dr_ste_htbl *cur_htbl, + u8 ste_location, + struct list_head *update_list, + enum mlx5dr_icm_chunk_size new_size) +{ + struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info; + struct mlx5dr_matcher *matcher = rule->matcher; + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_matcher_rx_tx *nic_matcher; + struct mlx5dr_ste_send_info *ste_info; + struct mlx5dr_htbl_connect_info info; + struct mlx5dr_domain_rx_tx *nic_dmn; + u8 formatted_ste[DR_STE_SIZE] = {}; + LIST_HEAD(rehash_table_send_list); + struct mlx5dr_ste *ste_to_update; + struct mlx5dr_ste_htbl *new_htbl; + int err; + + nic_matcher = nic_rule->nic_matcher; + nic_dmn = nic_matcher->nic_tbl->nic_dmn; + + ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL); + if (!ste_info) + return NULL; + + new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, + new_size, + cur_htbl->lu_type, + cur_htbl->byte_mask); + if (!new_htbl) { + mlx5dr_err(dmn, "Failed to allocate new hash table\n"); + goto free_ste_info; + } + + /* Write new table to HW */ + info.type = CONNECT_MISS; + info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr; + mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi, + nic_dmn, + new_htbl, + formatted_ste, + &info); + + new_htbl->pointing_ste = cur_htbl->pointing_ste; + new_htbl->pointing_ste->next_htbl = new_htbl; + err = dr_rule_rehash_copy_htbl(matcher, + nic_matcher, + cur_htbl, + new_htbl, + &rehash_table_send_list); + if (err) + goto free_new_htbl; + + if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste, + nic_matcher->ste_builder[ste_location - 1].bit_mask)) { + mlx5dr_err(dmn, "Failed writing table to HW\n"); + goto free_new_htbl; + } + + /* Writing to the hw is done in regular order of rehash_table_send_list, + * in order to have the origin data written before the miss address of + * collision entries, if exists. + */ + if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) { + mlx5dr_err(dmn, "Failed updating table to HW\n"); + goto free_ste_list; + } + + /* Connect previous hash table to current */ + if (ste_location == 1) { + /* The previous table is an anchor, anchors size is always one STE */ + struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl; + + /* On matcher s_anchor we keep an extra refcount */ + mlx5dr_htbl_get(new_htbl); + mlx5dr_htbl_put(cur_htbl); + + nic_matcher->s_htbl = new_htbl; + + /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here + * (48B len) which works only on first 32B + */ + mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste, + new_htbl->chunk->icm_addr, + new_htbl->chunk->num_of_entries); + + ste_to_update = &prev_htbl->ste_arr[0]; + } else { + mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste, + new_htbl); + ste_to_update = cur_htbl->pointing_ste; + } + + mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_REDUCED, + 0, ste_to_update->hw_ste, ste_info, + update_list, false); + + return new_htbl; + +free_ste_list: + /* Clean all ste_info's from the new table */ + list_for_each_entry_safe(del_ste_info, tmp_ste_info, + &rehash_table_send_list, send_list) { + list_del(&del_ste_info->send_list); + kfree(del_ste_info); + } + +free_new_htbl: + mlx5dr_ste_htbl_free(new_htbl); +free_ste_info: + kfree(ste_info); + mlx5dr_info(dmn, "Failed creating rehash table\n"); + return NULL; +} + +static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule, + struct mlx5dr_rule_rx_tx *nic_rule, + struct mlx5dr_ste_htbl *cur_htbl, + u8 ste_location, + struct list_head *update_list) +{ + struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; + enum mlx5dr_icm_chunk_size new_size; + + new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size); + new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz); + + if (new_size == cur_htbl->chunk_size) + return NULL; /* Skip rehash, we already at the max size */ + + return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location, + update_list, new_size); +} + +static struct mlx5dr_ste * +dr_rule_handle_collision(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_ste *ste, + u8 *hw_ste, + struct list_head *miss_list, + struct list_head *send_list) +{ + struct mlx5dr_ste_send_info *ste_info; + struct mlx5dr_ste *new_ste; + + ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL); + if (!ste_info) + return NULL; + + new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste); + if (!new_ste) + goto free_send_info; + + if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) { + mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n"); + goto err_exit; + } + + mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste, + ste_info, send_list, false); + + ste->htbl->ctrl.num_of_collisions++; + ste->htbl->ctrl.num_of_valid_entries++; + + return new_ste; + +err_exit: + mlx5dr_ste_free(new_ste, matcher, nic_matcher); +free_send_info: + kfree(ste_info); + return NULL; +} + +static void dr_rule_remove_action_members(struct mlx5dr_rule *rule) +{ + struct mlx5dr_rule_action_member *action_mem; + struct mlx5dr_rule_action_member *tmp; + + list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) { + list_del(&action_mem->list); + refcount_dec(&action_mem->action->refcount); + kvfree(action_mem); + } +} + +static int dr_rule_add_action_members(struct mlx5dr_rule *rule, + size_t num_actions, + struct mlx5dr_action *actions[]) +{ + struct mlx5dr_rule_action_member *action_mem; + int i; + + for (i = 0; i < num_actions; i++) { + action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL); + if (!action_mem) + goto free_action_members; + + action_mem->action = actions[i]; + INIT_LIST_HEAD(&action_mem->list); + list_add_tail(&action_mem->list, &rule->rule_actions_list); + refcount_inc(&action_mem->action->refcount); + } + + return 0; + +free_action_members: + dr_rule_remove_action_members(rule); + return -ENOMEM; +} + +/* While the pointer of ste is no longer valid, like while moving ste to be + * the first in the miss_list, and to be in the origin table, + * all rule-members that are attached to this ste should update their ste member + * to the new pointer + */ +void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste, + struct mlx5dr_ste *new_ste) +{ + struct mlx5dr_rule_member *rule_mem; + + if (!list_empty(&ste->rule_list)) + list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list) + rule_mem->ste = new_ste; +} + +static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule, + struct mlx5dr_rule_rx_tx *nic_rule) +{ + struct mlx5dr_rule_member *rule_mem; + struct mlx5dr_rule_member *tmp_mem; + + if (list_empty(&nic_rule->rule_members_list)) + return; + list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) { + list_del(&rule_mem->list); + list_del(&rule_mem->use_ste_list); + mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher); + kvfree(rule_mem); + } +} + +static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl, + struct mlx5dr_domain *dmn, + struct mlx5dr_domain_rx_tx *nic_dmn) +{ + struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl; + + if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size) + return false; + + if (!ctrl->may_grow) + return false; + + if (ctrl->num_of_collisions >= ctrl->increase_threshold && + (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold) + return true; + + return false; +} + +static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule, + struct mlx5dr_ste *ste) +{ + struct mlx5dr_rule_member *rule_mem; + + rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL); + if (!rule_mem) + return -ENOMEM; + + rule_mem->ste = ste; + list_add_tail(&rule_mem->list, &nic_rule->rule_members_list); + + list_add_tail(&rule_mem->use_ste_list, &ste->rule_list); + + return 0; +} + +static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, + struct mlx5dr_rule_rx_tx *nic_rule, + struct list_head *send_ste_list, + struct mlx5dr_ste *last_ste, + u8 *hw_ste_arr, + u32 new_hw_ste_arr_sz) +{ + struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher; + struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES]; + u8 num_of_builders = nic_matcher->num_of_builders; + struct mlx5dr_matcher *matcher = rule->matcher; + u8 *curr_hw_ste, *prev_hw_ste; + struct mlx5dr_ste *action_ste; + int i, k, ret; + + /* Two cases: + * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste + * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added + * to support the action. + */ + if (num_of_builders == new_hw_ste_arr_sz) + return 0; + + for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) { + curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE; + prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE); + action_ste = dr_rule_create_collision_htbl(matcher, + nic_matcher, + curr_hw_ste); + if (!action_ste) + return -ENOMEM; + + mlx5dr_ste_get(action_ste); + + /* While free ste we go over the miss list, so add this ste to the list */ + list_add_tail(&action_ste->miss_list_node, + mlx5dr_ste_get_miss_list(action_ste)); + + ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]), + GFP_KERNEL); + if (!ste_info_arr[k]) + goto err_exit; + + /* Point current ste to the new action */ + mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl); + ret = dr_rule_add_member(nic_rule, action_ste); + if (ret) { + mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n"); + goto free_ste_info; + } + mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0, + curr_hw_ste, + ste_info_arr[k], + send_ste_list, false); + } + + return 0; + +free_ste_info: + kfree(ste_info_arr[k]); +err_exit: + mlx5dr_ste_put(action_ste, matcher, nic_matcher); + return -ENOMEM; +} + +static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_ste_htbl *cur_htbl, + struct mlx5dr_ste *ste, + u8 ste_location, + u8 *hw_ste, + struct list_head *miss_list, + struct list_head *send_list) +{ + struct mlx5dr_ste_send_info *ste_info; + + /* Take ref on table, only on first time this ste is used */ + mlx5dr_htbl_get(cur_htbl); + + /* new entry -> new branch */ + list_add_tail(&ste->miss_list_node, miss_list); + + mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); + + ste->ste_chain_location = ste_location; + + ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL); + if (!ste_info) + goto clean_ste_setting; + + if (mlx5dr_ste_create_next_htbl(matcher, + nic_matcher, + ste, + hw_ste, + DR_CHUNK_SIZE_1)) { + mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n"); + goto clean_ste_info; + } + + cur_htbl->ctrl.num_of_valid_entries++; + + mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste, + ste_info, send_list, false); + + return 0; + +clean_ste_info: + kfree(ste_info); +clean_ste_setting: + list_del_init(&ste->miss_list_node); + mlx5dr_htbl_put(cur_htbl); + + return -ENOMEM; +} + +static struct mlx5dr_ste * +dr_rule_handle_ste_branch(struct mlx5dr_rule *rule, + struct mlx5dr_rule_rx_tx *nic_rule, + struct list_head *send_ste_list, + struct mlx5dr_ste_htbl *cur_htbl, + u8 *hw_ste, + u8 ste_location, + struct mlx5dr_ste_htbl **put_htbl) +{ + struct mlx5dr_matcher *matcher = rule->matcher; + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_matcher_rx_tx *nic_matcher; + struct mlx5dr_domain_rx_tx *nic_dmn; + struct mlx5dr_ste_htbl *new_htbl; + struct mlx5dr_ste *matched_ste; + struct list_head *miss_list; + bool skip_rehash = false; + struct mlx5dr_ste *ste; + int index; + + nic_matcher = nic_rule->nic_matcher; + nic_dmn = nic_matcher->nic_tbl->nic_dmn; + +again: + index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl); + miss_list = &cur_htbl->chunk->miss_list[index]; + ste = &cur_htbl->ste_arr[index]; + + if (mlx5dr_ste_not_used_ste(ste)) { + if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl, + ste, ste_location, + hw_ste, miss_list, + send_ste_list)) + return NULL; + } else { + /* Hash table index in use, check if this ste is in the miss list */ + matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste); + if (matched_ste) { + /* If it is last STE in the chain, and has the same tag + * it means that all the previous stes are the same, + * if so, this rule is duplicated. + */ + if (mlx5dr_ste_is_last_in_rule(nic_matcher, + matched_ste->ste_chain_location)) { + mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n"); + return NULL; + } + return matched_ste; + } + + if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) { + /* Hash table index in use, try to resize of the hash */ + skip_rehash = true; + + /* Hold the table till we update. + * Release in dr_rule_create_rule() + */ + *put_htbl = cur_htbl; + mlx5dr_htbl_get(cur_htbl); + + new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl, + ste_location, send_ste_list); + if (!new_htbl) { + mlx5dr_htbl_put(cur_htbl); + mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n", + cur_htbl->chunk_size); + } else { + cur_htbl = new_htbl; + } + goto again; + } else { + /* Hash table index in use, add another collision (miss) */ + ste = dr_rule_handle_collision(matcher, + nic_matcher, + ste, + hw_ste, + miss_list, + send_ste_list); + if (!ste) { + mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n", + index); + return NULL; + } + } + } + return ste; +} + +static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value, + u32 s_idx, u32 e_idx) +{ + u32 i; + + for (i = s_idx; i < e_idx; i++) { + if (value[i] & ~mask[i]) { + pr_info("Rule parameters contains a value not specified by mask\n"); + return false; + } + } + return true; +} + +static bool dr_rule_verify(struct mlx5dr_matcher *matcher, + struct mlx5dr_match_parameters *value, + struct mlx5dr_match_param *param) +{ + u8 match_criteria = matcher->match_criteria; + size_t value_size = value->match_sz; + u8 *mask_p = (u8 *)&matcher->mask; + u8 *param_p = (u8 *)param; + u32 s_idx, e_idx; + + if (!value_size || + (value_size > sizeof(struct mlx5dr_match_param) || + (value_size % sizeof(u32)))) { + mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n"); + return false; + } + + mlx5dr_ste_copy_param(matcher->match_criteria, param, value); + + if (match_criteria & DR_MATCHER_CRITERIA_OUTER) { + s_idx = offsetof(struct mlx5dr_match_param, outer); + e_idx = min(s_idx + sizeof(param->outer), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { + mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n"); + return false; + } + } + + if (match_criteria & DR_MATCHER_CRITERIA_MISC) { + s_idx = offsetof(struct mlx5dr_match_param, misc); + e_idx = min(s_idx + sizeof(param->misc), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { + mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n"); + return false; + } + } + + if (match_criteria & DR_MATCHER_CRITERIA_INNER) { + s_idx = offsetof(struct mlx5dr_match_param, inner); + e_idx = min(s_idx + sizeof(param->inner), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { + mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n"); + return false; + } + } + + if (match_criteria & DR_MATCHER_CRITERIA_MISC2) { + s_idx = offsetof(struct mlx5dr_match_param, misc2); + e_idx = min(s_idx + sizeof(param->misc2), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { + mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n"); + return false; + } + } + + if (match_criteria & DR_MATCHER_CRITERIA_MISC3) { + s_idx = offsetof(struct mlx5dr_match_param, misc3); + e_idx = min(s_idx + sizeof(param->misc3), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { + mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n"); + return false; + } + } + return true; +} + +static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule, + struct mlx5dr_rule_rx_tx *nic_rule) +{ + dr_rule_clean_rule_members(rule, nic_rule); + return 0; +} + +static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule) +{ + dr_rule_destroy_rule_nic(rule, &rule->rx); + dr_rule_destroy_rule_nic(rule, &rule->tx); + return 0; +} + +static int dr_rule_destroy_rule(struct mlx5dr_rule *rule) +{ + struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; + + switch (dmn->type) { + case MLX5DR_DOMAIN_TYPE_NIC_RX: + dr_rule_destroy_rule_nic(rule, &rule->rx); + break; + case MLX5DR_DOMAIN_TYPE_NIC_TX: + dr_rule_destroy_rule_nic(rule, &rule->tx); + break; + case MLX5DR_DOMAIN_TYPE_FDB: + dr_rule_destroy_rule_fdb(rule); + break; + default: + return -EINVAL; + } + + dr_rule_remove_action_members(rule); + kfree(rule); + return 0; +} + +static bool dr_rule_is_ipv6(struct mlx5dr_match_param *param) +{ + return (param->outer.ip_version == 6 || + param->inner.ip_version == 6 || + param->outer.ethertype == ETH_P_IPV6 || + param->inner.ethertype == ETH_P_IPV6); +} + +static bool dr_rule_skip(enum mlx5dr_domain_type domain, + enum mlx5dr_ste_entry_type ste_type, + struct mlx5dr_match_param *mask, + struct mlx5dr_match_param *value) +{ + if (domain != MLX5DR_DOMAIN_TYPE_FDB) + return false; + + if (mask->misc.source_port) { + if (ste_type == MLX5DR_STE_TYPE_RX) + if (value->misc.source_port != WIRE_PORT) + return true; + + if (ste_type == MLX5DR_STE_TYPE_TX) + if (value->misc.source_port == WIRE_PORT) + return true; + } + + /* Metadata C can be used to describe the source vport */ + if (mask->misc2.metadata_reg_c_0) { + if (ste_type == MLX5DR_STE_TYPE_RX) + if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) != WIRE_PORT) + return true; + + if (ste_type == MLX5DR_STE_TYPE_TX) + if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) == WIRE_PORT) + return true; + } + return false; +} + +static int +dr_rule_create_rule_nic(struct mlx5dr_rule *rule, + struct mlx5dr_rule_rx_tx *nic_rule, + struct mlx5dr_match_param *param, + size_t num_actions, + struct mlx5dr_action *actions[]) +{ + struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info; + struct mlx5dr_matcher *matcher = rule->matcher; + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_matcher_rx_tx *nic_matcher; + struct mlx5dr_domain_rx_tx *nic_dmn; + struct mlx5dr_ste_htbl *htbl = NULL; + struct mlx5dr_ste_htbl *cur_htbl; + struct mlx5dr_ste *ste = NULL; + LIST_HEAD(send_ste_list); + u8 *hw_ste_arr = NULL; + u32 new_hw_ste_arr_sz; + int ret, i; + + nic_matcher = nic_rule->nic_matcher; + nic_dmn = nic_matcher->nic_tbl->nic_dmn; + + INIT_LIST_HEAD(&nic_rule->rule_members_list); + + if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param)) + return 0; + + ret = mlx5dr_matcher_select_builders(matcher, + nic_matcher, + dr_rule_is_ipv6(param)); + if (ret) + goto out_err; + + hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL); + if (!hw_ste_arr) { + ret = -ENOMEM; + goto out_err; + } + + /* Set the tag values inside the ste array */ + ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr); + if (ret) + goto free_hw_ste; + + /* Set the actions values/addresses inside the ste array */ + ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions, + num_actions, hw_ste_arr, + &new_hw_ste_arr_sz); + if (ret) + goto free_hw_ste; + + cur_htbl = nic_matcher->s_htbl; + + /* Go over the array of STEs, and build dr_ste accordingly. + * The loop is over only the builders which are equal or less to the + * number of stes, in case we have actions that lives in other stes. + */ + for (i = 0; i < nic_matcher->num_of_builders; i++) { + /* Calculate CRC and keep new ste entry */ + u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE); + + ste = dr_rule_handle_ste_branch(rule, + nic_rule, + &send_ste_list, + cur_htbl, + cur_hw_ste_ent, + i + 1, + &htbl); + if (!ste) { + mlx5dr_err(dmn, "Failed creating next branch\n"); + ret = -ENOENT; + goto free_rule; + } + + cur_htbl = ste->next_htbl; + + /* Keep all STEs in the rule struct */ + ret = dr_rule_add_member(nic_rule, ste); + if (ret) { + mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i); + goto free_ste; + } + + mlx5dr_ste_get(ste); + } + + /* Connect actions */ + ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list, + ste, hw_ste_arr, new_hw_ste_arr_sz); + if (ret) { + mlx5dr_dbg(dmn, "Failed apply actions\n"); + goto free_rule; + } + ret = dr_rule_send_update_list(&send_ste_list, dmn, true); + if (ret) { + mlx5dr_err(dmn, "Failed sending ste!\n"); + goto free_rule; + } + + if (htbl) + mlx5dr_htbl_put(htbl); + + return 0; + +free_ste: + mlx5dr_ste_put(ste, matcher, nic_matcher); +free_rule: + dr_rule_clean_rule_members(rule, nic_rule); + /* Clean all ste_info's */ + list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) { + list_del(&ste_info->send_list); + kfree(ste_info); + } +free_hw_ste: + kfree(hw_ste_arr); +out_err: + return ret; +} + +static int +dr_rule_create_rule_fdb(struct mlx5dr_rule *rule, + struct mlx5dr_match_param *param, + size_t num_actions, + struct mlx5dr_action *actions[]) +{ + struct mlx5dr_match_param copy_param = {}; + int ret; + + /* Copy match_param since they will be consumed during the first + * nic_rule insertion. + */ + memcpy(©_param, param, sizeof(struct mlx5dr_match_param)); + + ret = dr_rule_create_rule_nic(rule, &rule->rx, param, + num_actions, actions); + if (ret) + return ret; + + ret = dr_rule_create_rule_nic(rule, &rule->tx, ©_param, + num_actions, actions); + if (ret) + goto destroy_rule_nic_rx; + + return 0; + +destroy_rule_nic_rx: + dr_rule_destroy_rule_nic(rule, &rule->rx); + return ret; +} + +static struct mlx5dr_rule * +dr_rule_create_rule(struct mlx5dr_matcher *matcher, + struct mlx5dr_match_parameters *value, + size_t num_actions, + struct mlx5dr_action *actions[]) +{ + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_match_param param = {}; + struct mlx5dr_rule *rule; + int ret; + + if (!dr_rule_verify(matcher, value, ¶m)) + return NULL; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return NULL; + + rule->matcher = matcher; + INIT_LIST_HEAD(&rule->rule_actions_list); + + ret = dr_rule_add_action_members(rule, num_actions, actions); + if (ret) + goto free_rule; + + switch (dmn->type) { + case MLX5DR_DOMAIN_TYPE_NIC_RX: + rule->rx.nic_matcher = &matcher->rx; + ret = dr_rule_create_rule_nic(rule, &rule->rx, ¶m, + num_actions, actions); + break; + case MLX5DR_DOMAIN_TYPE_NIC_TX: + rule->tx.nic_matcher = &matcher->tx; + ret = dr_rule_create_rule_nic(rule, &rule->tx, ¶m, + num_actions, actions); + break; + case MLX5DR_DOMAIN_TYPE_FDB: + rule->rx.nic_matcher = &matcher->rx; + rule->tx.nic_matcher = &matcher->tx; + ret = dr_rule_create_rule_fdb(rule, ¶m, + num_actions, actions); + break; + default: + ret = -EINVAL; + break; + } + + if (ret) + goto remove_action_members; + + return rule; + +remove_action_members: + dr_rule_remove_action_members(rule); +free_rule: + kfree(rule); + mlx5dr_info(dmn, "Failed creating rule\n"); + return NULL; +} + +struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher, + struct mlx5dr_match_parameters *value, + size_t num_actions, + struct mlx5dr_action *actions[]) +{ + struct mlx5dr_rule *rule; + + mutex_lock(&matcher->tbl->dmn->mutex); + refcount_inc(&matcher->refcount); + + rule = dr_rule_create_rule(matcher, value, num_actions, actions); + if (!rule) + refcount_dec(&matcher->refcount); + + mutex_unlock(&matcher->tbl->dmn->mutex); + + return rule; +} + +int mlx5dr_rule_destroy(struct mlx5dr_rule *rule) +{ + struct mlx5dr_matcher *matcher = rule->matcher; + struct mlx5dr_table *tbl = rule->matcher->tbl; + int ret; + + mutex_lock(&tbl->dmn->mutex); + + ret = dr_rule_destroy_rule(rule); + + mutex_unlock(&tbl->dmn->mutex); + + if (!ret) + refcount_dec(&matcher->refcount); + return ret; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c new file mode 100644 index 000000000000..5df8436b2ae3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -0,0 +1,975 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include "dr_types.h" + +#define QUEUE_SIZE 128 +#define SIGNAL_PER_DIV_QUEUE 16 +#define TH_NUMS_TO_DRAIN 2 + +enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 }; + +struct dr_data_seg { + u64 addr; + u32 length; + u32 lkey; + unsigned int send_flags; +}; + +struct postsend_info { + struct dr_data_seg write; + struct dr_data_seg read; + u64 remote_addr; + u32 rkey; +}; + +struct dr_qp_rtr_attr { + struct mlx5dr_cmd_gid_attr dgid_attr; + enum ib_mtu mtu; + u32 qp_num; + u16 port_num; + u8 min_rnr_timer; + u8 sgid_index; + u16 udp_src_port; +}; + +struct dr_qp_rts_attr { + u8 timeout; + u8 retry_cnt; + u8 rnr_retry; +}; + +struct dr_qp_init_attr { + u32 cqn; + u32 pdn; + u32 max_send_wr; + struct mlx5_uars_page *uar; +}; + +static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64) +{ + unsigned int idx; + u8 opcode; + + opcode = get_cqe_opcode(cqe64); + if (opcode == MLX5_CQE_REQ_ERR) { + idx = be16_to_cpu(cqe64->wqe_counter) & + (dr_cq->qp->sq.wqe_cnt - 1); + dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1; + } else if (opcode == MLX5_CQE_RESP_ERR) { + ++dr_cq->qp->sq.cc; + } else { + idx = be16_to_cpu(cqe64->wqe_counter) & + (dr_cq->qp->sq.wqe_cnt - 1); + dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1; + + return CQ_OK; + } + + return CQ_POLL_ERR; +} + +static int dr_cq_poll_one(struct mlx5dr_cq *dr_cq) +{ + struct mlx5_cqe64 *cqe64; + int err; + + cqe64 = mlx5_cqwq_get_cqe(&dr_cq->wq); + if (!cqe64) + return CQ_EMPTY; + + mlx5_cqwq_pop(&dr_cq->wq); + err = dr_parse_cqe(dr_cq, cqe64); + mlx5_cqwq_update_db_record(&dr_cq->wq); + + return err; +} + +static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne) +{ + int npolled; + int err = 0; + + for (npolled = 0; npolled < ne; ++npolled) { + err = dr_cq_poll_one(dr_cq); + if (err != CQ_OK) + break; + } + + return err == CQ_POLL_ERR ? err : npolled; +} + +static void dr_qp_event(struct mlx5_core_qp *mqp, int event) +{ + pr_info("DR QP event %u on QP #%u\n", event, mqp->qpn); +} + +static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, + struct dr_qp_init_attr *attr) +{ + u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {}; + struct mlx5_wq_param wqp; + struct mlx5dr_qp *dr_qp; + int inlen; + void *qpc; + void *in; + int err; + + dr_qp = kzalloc(sizeof(*dr_qp), GFP_KERNEL); + if (!dr_qp) + return NULL; + + wqp.buf_numa_node = mdev->priv.numa_node; + wqp.db_numa_node = mdev->priv.numa_node; + + dr_qp->rq.pc = 0; + dr_qp->rq.cc = 0; + dr_qp->rq.wqe_cnt = 4; + dr_qp->sq.pc = 0; + dr_qp->sq.cc = 0; + dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr); + + MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4); + MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt)); + MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt)); + err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq, + &dr_qp->wq_ctrl); + if (err) { + mlx5_core_info(mdev, "Can't create QP WQ\n"); + goto err_wq; + } + + dr_qp->sq.wqe_head = kcalloc(dr_qp->sq.wqe_cnt, + sizeof(dr_qp->sq.wqe_head[0]), + GFP_KERNEL); + + if (!dr_qp->sq.wqe_head) { + mlx5_core_warn(mdev, "Can't allocate wqe head\n"); + goto err_wqe_head; + } + + inlen = MLX5_ST_SZ_BYTES(create_qp_in) + + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * + dr_qp->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_in; + } + + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); + MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); + MLX5_SET(qpc, qpc, pd, attr->pdn); + MLX5_SET(qpc, qpc, uar_page, attr->uar->index); + MLX5_SET(qpc, qpc, log_page_size, + dr_qp->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET(qpc, qpc, fre, 1); + MLX5_SET(qpc, qpc, rlky, 1); + MLX5_SET(qpc, qpc, cqn_snd, attr->cqn); + MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn); + MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4); + MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt)); + MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); + MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt)); + MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma); + if (MLX5_CAP_GEN(mdev, cqe_version) == 1) + MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); + mlx5_fill_page_frag_array(&dr_qp->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_qp_in, + in, pas)); + + err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen); + kfree(in); + + if (err) { + mlx5_core_warn(mdev, " Can't create QP\n"); + goto err_in; + } + dr_qp->mqp.event = dr_qp_event; + dr_qp->uar = attr->uar; + + return dr_qp; + +err_in: + kfree(dr_qp->sq.wqe_head); +err_wqe_head: + mlx5_wq_destroy(&dr_qp->wq_ctrl); +err_wq: + kfree(dr_qp); + return NULL; +} + +static void dr_destroy_qp(struct mlx5_core_dev *mdev, + struct mlx5dr_qp *dr_qp) +{ + mlx5_core_destroy_qp(mdev, &dr_qp->mqp); + kfree(dr_qp->sq.wqe_head); + mlx5_wq_destroy(&dr_qp->wq_ctrl); + kfree(dr_qp); +} + +static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl) +{ + dma_wmb(); + *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xfffff); + + /* After wmb() the hw aware of new work */ + wmb(); + + mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET); +} + +static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, + u32 rkey, struct dr_data_seg *data_seg, + u32 opcode, int nreq) +{ + struct mlx5_wqe_raddr_seg *wq_raddr; + struct mlx5_wqe_ctrl_seg *wq_ctrl; + struct mlx5_wqe_data_seg *wq_dseg; + unsigned int size; + unsigned int idx; + + size = sizeof(*wq_ctrl) / 16 + sizeof(*wq_dseg) / 16 + + sizeof(*wq_raddr) / 16; + + idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1); + + wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx); + wq_ctrl->imm = 0; + wq_ctrl->fm_ce_se = (data_seg->send_flags) ? + MLX5_WQE_CTRL_CQ_UPDATE : 0; + wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) | + opcode); + wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->mqp.qpn << 8); + wq_raddr = (void *)(wq_ctrl + 1); + wq_raddr->raddr = cpu_to_be64(remote_addr); + wq_raddr->rkey = cpu_to_be32(rkey); + wq_raddr->reserved = 0; + + wq_dseg = (void *)(wq_raddr + 1); + wq_dseg->byte_count = cpu_to_be32(data_seg->length); + wq_dseg->lkey = cpu_to_be32(data_seg->lkey); + wq_dseg->addr = cpu_to_be64(data_seg->addr); + + dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++; + + if (nreq) + dr_cmd_notify_hw(dr_qp, wq_ctrl); +} + +static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info) +{ + dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, + &send_info->write, MLX5_OPCODE_RDMA_WRITE, 0); + dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, + &send_info->read, MLX5_OPCODE_RDMA_READ, 1); +} + +/** + * mlx5dr_send_fill_and_append_ste_send_info: Add data to be sent + * with send_list parameters: + * + * @ste: The data that attached to this specific ste + * @size: of data to write + * @offset: of the data from start of the hw_ste entry + * @data: data + * @ste_info: ste to be sent with send_list + * @send_list: to append into it + * @copy_data: if true indicates that the data should be kept because + * it's not backuped any where (like in re-hash). + * if false, it lets the data to be updated after + * it was added to the list. + */ +void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size, + u16 offset, u8 *data, + struct mlx5dr_ste_send_info *ste_info, + struct list_head *send_list, + bool copy_data) +{ + ste_info->size = size; + ste_info->ste = ste; + ste_info->offset = offset; + + if (copy_data) { + memcpy(ste_info->data_cont, data, size); + ste_info->data = ste_info->data_cont; + } else { + ste_info->data = data; + } + + list_add_tail(&ste_info->send_list, send_list); +} + +/* The function tries to consume one wc each time, unless the queue is full, in + * that case, which means that the hw is behind the sw in a full queue len + * the function will drain the cq till it empty. + */ +static int dr_handle_pending_wc(struct mlx5dr_domain *dmn, + struct mlx5dr_send_ring *send_ring) +{ + bool is_drain = false; + int ne; + + if (send_ring->pending_wqe < send_ring->signal_th) + return 0; + + /* Queue is full start drain it */ + if (send_ring->pending_wqe >= + dmn->send_ring->signal_th * TH_NUMS_TO_DRAIN) + is_drain = true; + + do { + ne = dr_poll_cq(send_ring->cq, 1); + if (ne < 0) + return ne; + else if (ne == 1) + send_ring->pending_wqe -= send_ring->signal_th; + } while (is_drain && send_ring->pending_wqe); + + return 0; +} + +static void dr_fill_data_segs(struct mlx5dr_send_ring *send_ring, + struct postsend_info *send_info) +{ + send_ring->pending_wqe++; + + if (send_ring->pending_wqe % send_ring->signal_th == 0) + send_info->write.send_flags |= IB_SEND_SIGNALED; + + send_ring->pending_wqe++; + send_info->read.length = send_info->write.length; + /* Read into the same write area */ + send_info->read.addr = (uintptr_t)send_info->write.addr; + send_info->read.lkey = send_ring->mr->mkey.key; + + if (send_ring->pending_wqe % send_ring->signal_th == 0) + send_info->read.send_flags = IB_SEND_SIGNALED; + else + send_info->read.send_flags = 0; +} + +static int dr_postsend_icm_data(struct mlx5dr_domain *dmn, + struct postsend_info *send_info) +{ + struct mlx5dr_send_ring *send_ring = dmn->send_ring; + u32 buff_offset; + int ret; + + ret = dr_handle_pending_wc(dmn, send_ring); + if (ret) + return ret; + + if (send_info->write.length > dmn->info.max_inline_size) { + buff_offset = (send_ring->tx_head & + (dmn->send_ring->signal_th - 1)) * + send_ring->max_post_send_size; + /* Copy to ring mr */ + memcpy(send_ring->buf + buff_offset, + (void *)(uintptr_t)send_info->write.addr, + send_info->write.length); + send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset; + send_info->write.lkey = send_ring->mr->mkey.key; + } + + send_ring->tx_head++; + dr_fill_data_segs(send_ring, send_info); + dr_post_send(send_ring->qp, send_info); + + return 0; +} + +static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn, + struct mlx5dr_ste_htbl *htbl, + u8 **data, + u32 *byte_size, + int *iterations, + int *num_stes) +{ + int alloc_size; + + if (htbl->chunk->byte_size > dmn->send_ring->max_post_send_size) { + *iterations = htbl->chunk->byte_size / + dmn->send_ring->max_post_send_size; + *byte_size = dmn->send_ring->max_post_send_size; + alloc_size = *byte_size; + *num_stes = *byte_size / DR_STE_SIZE; + } else { + *iterations = 1; + *num_stes = htbl->chunk->num_of_entries; + alloc_size = *num_stes * DR_STE_SIZE; + } + + *data = kzalloc(alloc_size, GFP_KERNEL); + if (!*data) + return -ENOMEM; + + return 0; +} + +/** + * mlx5dr_send_postsend_ste: write size bytes into offset from the hw cm. + * + * @dmn: Domain + * @ste: The ste struct that contains the data (at + * least part of it) + * @data: The real data to send size data + * @size: for writing. + * @offset: The offset from the icm mapped data to + * start write to this for write only part of the + * buffer. + * + * Return: 0 on success. + */ +int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste, + u8 *data, u16 size, u16 offset) +{ + struct postsend_info send_info = {}; + + send_info.write.addr = (uintptr_t)data; + send_info.write.length = size; + send_info.write.lkey = 0; + send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset; + send_info.rkey = ste->htbl->chunk->rkey; + + return dr_postsend_icm_data(dmn, &send_info); +} + +int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn, + struct mlx5dr_ste_htbl *htbl, + u8 *formatted_ste, u8 *mask) +{ + u32 byte_size = htbl->chunk->byte_size; + int num_stes_per_iter; + int iterations; + u8 *data; + int ret; + int i; + int j; + + ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size, + &iterations, &num_stes_per_iter); + if (ret) + return ret; + + /* Send the data iteration times */ + for (i = 0; i < iterations; i++) { + u32 ste_index = i * (byte_size / DR_STE_SIZE); + struct postsend_info send_info = {}; + + /* Copy all ste's on the data buffer + * need to add the bit_mask + */ + for (j = 0; j < num_stes_per_iter; j++) { + u8 *hw_ste = htbl->ste_arr[ste_index + j].hw_ste; + u32 ste_off = j * DR_STE_SIZE; + + if (mlx5dr_ste_is_not_valid_entry(hw_ste)) { + memcpy(data + ste_off, + formatted_ste, DR_STE_SIZE); + } else { + /* Copy data */ + memcpy(data + ste_off, + htbl->ste_arr[ste_index + j].hw_ste, + DR_STE_SIZE_REDUCED); + /* Copy bit_mask */ + memcpy(data + ste_off + DR_STE_SIZE_REDUCED, + mask, DR_STE_SIZE_MASK); + } + } + + send_info.write.addr = (uintptr_t)data; + send_info.write.length = byte_size; + send_info.write.lkey = 0; + send_info.remote_addr = + mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index); + send_info.rkey = htbl->chunk->rkey; + + ret = dr_postsend_icm_data(dmn, &send_info); + if (ret) + goto out_free; + } + +out_free: + kfree(data); + return ret; +} + +/* Initialize htble with default STEs */ +int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn, + struct mlx5dr_ste_htbl *htbl, + u8 *ste_init_data, + bool update_hw_ste) +{ + u32 byte_size = htbl->chunk->byte_size; + int iterations; + int num_stes; + u8 *data; + int ret; + int i; + + ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size, + &iterations, &num_stes); + if (ret) + return ret; + + for (i = 0; i < num_stes; i++) { + u8 *copy_dst; + + /* Copy the same ste on the data buffer */ + copy_dst = data + i * DR_STE_SIZE; + memcpy(copy_dst, ste_init_data, DR_STE_SIZE); + + if (update_hw_ste) { + /* Copy the reduced ste to hash table ste_arr */ + copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED; + memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED); + } + } + + /* Send the data iteration times */ + for (i = 0; i < iterations; i++) { + u8 ste_index = i * (byte_size / DR_STE_SIZE); + struct postsend_info send_info = {}; + + send_info.write.addr = (uintptr_t)data; + send_info.write.length = byte_size; + send_info.write.lkey = 0; + send_info.remote_addr = + mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index); + send_info.rkey = htbl->chunk->rkey; + + ret = dr_postsend_icm_data(dmn, &send_info); + if (ret) + goto out_free; + } + +out_free: + kfree(data); + return ret; +} + +int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, + struct mlx5dr_action *action) +{ + struct postsend_info send_info = {}; + int ret; + + send_info.write.addr = (uintptr_t)action->rewrite.data; + send_info.write.length = action->rewrite.chunk->byte_size; + send_info.write.lkey = 0; + send_info.remote_addr = action->rewrite.chunk->mr_addr; + send_info.rkey = action->rewrite.chunk->rkey; + + mutex_lock(&dmn->mutex); + ret = dr_postsend_icm_data(dmn, &send_info); + mutex_unlock(&dmn->mutex); + + return ret; +} + +static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev, + struct mlx5dr_qp *dr_qp, + int port) +{ + u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {}; + void *qpc; + + qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc); + + MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, port); + MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED); + MLX5_SET(qpc, qpc, rre, 1); + MLX5_SET(qpc, qpc, rwe, 1); + + return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc, + &dr_qp->mqp); +} + +static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev, + struct mlx5dr_qp *dr_qp, + struct dr_qp_rts_attr *attr) +{ + u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {}; + void *qpc; + + qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc); + + MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->mqp.qpn); + + MLX5_SET(qpc, qpc, log_ack_req_freq, 0); + MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt); + MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry); + + return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, qpc, + &dr_qp->mqp); +} + +static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, + struct mlx5dr_qp *dr_qp, + struct dr_qp_rtr_attr *attr) +{ + u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {}; + void *qpc; + + qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc); + + MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->mqp.qpn); + + MLX5_SET(qpc, qpc, mtu, attr->mtu); + MLX5_SET(qpc, qpc, log_msg_max, DR_CHUNK_SIZE_MAX - 1); + MLX5_SET(qpc, qpc, remote_qpn, attr->qp_num); + memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32), + attr->dgid_attr.mac, sizeof(attr->dgid_attr.mac)); + memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip), + attr->dgid_attr.gid, sizeof(attr->dgid_attr.gid)); + MLX5_SET(qpc, qpc, primary_address_path.src_addr_index, + attr->sgid_index); + + if (attr->dgid_attr.roce_ver == MLX5_ROCE_VERSION_2) + MLX5_SET(qpc, qpc, primary_address_path.udp_sport, + attr->udp_src_port); + + MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num); + MLX5_SET(qpc, qpc, min_rnr_nak, 1); + + return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc, + &dr_qp->mqp); +} + +static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) +{ + struct mlx5dr_qp *dr_qp = dmn->send_ring->qp; + struct dr_qp_rts_attr rts_attr = {}; + struct dr_qp_rtr_attr rtr_attr = {}; + enum ib_mtu mtu = IB_MTU_1024; + u16 gid_index = 0; + int port = 1; + int ret; + + /* Init */ + ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port); + if (ret) + return ret; + + /* RTR */ + ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr); + if (ret) + return ret; + + rtr_attr.mtu = mtu; + rtr_attr.qp_num = dr_qp->mqp.qpn; + rtr_attr.min_rnr_timer = 12; + rtr_attr.port_num = port; + rtr_attr.sgid_index = gid_index; + rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp; + + ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr); + if (ret) + return ret; + + /* RTS */ + rts_attr.timeout = 14; + rts_attr.retry_cnt = 7; + rts_attr.rnr_retry = 7; + + ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr); + if (ret) + return ret; + + return 0; +} + +static void dr_cq_event(struct mlx5_core_cq *mcq, + enum mlx5_event event) +{ + pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn); +} + +static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, + struct mlx5_uars_page *uar, + size_t ncqe) +{ + u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {}; + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; + struct mlx5_wq_param wqp; + struct mlx5_cqe64 *cqe; + struct mlx5dr_cq *cq; + int inlen, err, eqn; + unsigned int irqn; + void *cqc, *in; + __be64 *pas; + u32 i; + + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + return NULL; + + ncqe = roundup_pow_of_two(ncqe); + MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(ncqe)); + + wqp.buf_numa_node = mdev->priv.numa_node; + wqp.db_numa_node = mdev->priv.numa_node; + + err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq, + &cq->wq_ctrl); + if (err) + goto out; + + for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { + cqe = mlx5_cqwq_get_wqe(&cq->wq, i); + cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK; + } + + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + sizeof(u64) * cq->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + goto err_cqwq; + + err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn); + if (err) { + kvfree(in); + goto err_cqwq; + } + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe)); + MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET(cqc, cqc, uar_page, uar->index); + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - + MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); + + pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas); + + cq->mcq.event = dr_cq_event; + + err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); + kvfree(in); + + if (err) + goto err_cqwq; + + cq->mcq.cqe_sz = 64; + cq->mcq.set_ci_db = cq->wq_ctrl.db.db; + cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; + *cq->mcq.set_ci_db = 0; + *cq->mcq.arm_db = 0; + cq->mcq.vector = 0; + cq->mcq.irqn = irqn; + cq->mcq.uar = uar; + + return cq; + +err_cqwq: + mlx5_wq_destroy(&cq->wq_ctrl); +out: + kfree(cq); + return NULL; +} + +static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq) +{ + mlx5_core_destroy_cq(mdev, &cq->mcq); + mlx5_wq_destroy(&cq->wq_ctrl); + kfree(cq); +} + +static int +dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey) +{ + u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {}; + void *mkc; + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); + MLX5_SET(mkc, mkc, a, 1); + MLX5_SET(mkc, mkc, rw, 1); + MLX5_SET(mkc, mkc, rr, 1); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + + MLX5_SET(mkc, mkc, pd, pdn); + MLX5_SET(mkc, mkc, length64, 1); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + + return mlx5_core_create_mkey(mdev, mkey, in, sizeof(in)); +} + +static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev, + u32 pdn, void *buf, size_t size) +{ + struct mlx5dr_mr *mr = kzalloc(sizeof(*mr), GFP_KERNEL); + struct device *dma_device; + dma_addr_t dma_addr; + int err; + + if (!mr) + return NULL; + + dma_device = &mdev->pdev->dev; + dma_addr = dma_map_single(dma_device, buf, size, + DMA_BIDIRECTIONAL); + err = dma_mapping_error(dma_device, dma_addr); + if (err) { + mlx5_core_warn(mdev, "Can't dma buf\n"); + kfree(mr); + return NULL; + } + + err = dr_create_mkey(mdev, pdn, &mr->mkey); + if (err) { + mlx5_core_warn(mdev, "Can't create mkey\n"); + dma_unmap_single(dma_device, dma_addr, size, + DMA_BIDIRECTIONAL); + kfree(mr); + return NULL; + } + + mr->dma_addr = dma_addr; + mr->size = size; + mr->addr = buf; + + return mr; +} + +static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr) +{ + mlx5_core_destroy_mkey(mdev, &mr->mkey); + dma_unmap_single(&mdev->pdev->dev, mr->dma_addr, mr->size, + DMA_BIDIRECTIONAL); + kfree(mr); +} + +int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) +{ + struct dr_qp_init_attr init_attr = {}; + int cq_size; + int size; + int ret; + + dmn->send_ring = kzalloc(sizeof(*dmn->send_ring), GFP_KERNEL); + if (!dmn->send_ring) + return -ENOMEM; + + cq_size = QUEUE_SIZE + 1; + dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size); + if (!dmn->send_ring->cq) { + ret = -ENOMEM; + goto free_send_ring; + } + + init_attr.cqn = dmn->send_ring->cq->mcq.cqn; + init_attr.pdn = dmn->pdn; + init_attr.uar = dmn->uar; + init_attr.max_send_wr = QUEUE_SIZE; + + dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr); + if (!dmn->send_ring->qp) { + ret = -ENOMEM; + goto clean_cq; + } + + dmn->send_ring->cq->qp = dmn->send_ring->qp; + + dmn->info.max_send_wr = QUEUE_SIZE; + dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data, + DR_STE_SIZE); + + dmn->send_ring->signal_th = dmn->info.max_send_wr / + SIGNAL_PER_DIV_QUEUE; + + /* Prepare qp to be used */ + ret = dr_prepare_qp_to_rts(dmn); + if (ret) + goto clean_qp; + + dmn->send_ring->max_post_send_size = + mlx5dr_icm_pool_chunk_size_to_byte(DR_CHUNK_SIZE_1K, + DR_ICM_TYPE_STE); + + /* Allocating the max size as a buffer for writing */ + size = dmn->send_ring->signal_th * dmn->send_ring->max_post_send_size; + dmn->send_ring->buf = kzalloc(size, GFP_KERNEL); + if (!dmn->send_ring->buf) { + ret = -ENOMEM; + goto clean_qp; + } + + dmn->send_ring->buf_size = size; + + dmn->send_ring->mr = dr_reg_mr(dmn->mdev, + dmn->pdn, dmn->send_ring->buf, size); + if (!dmn->send_ring->mr) { + ret = -ENOMEM; + goto free_mem; + } + + dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev, + dmn->pdn, dmn->send_ring->sync_buff, + MIN_READ_SYNC); + if (!dmn->send_ring->sync_mr) { + ret = -ENOMEM; + goto clean_mr; + } + + return 0; + +clean_mr: + dr_dereg_mr(dmn->mdev, dmn->send_ring->mr); +free_mem: + kfree(dmn->send_ring->buf); +clean_qp: + dr_destroy_qp(dmn->mdev, dmn->send_ring->qp); +clean_cq: + dr_destroy_cq(dmn->mdev, dmn->send_ring->cq); +free_send_ring: + kfree(dmn->send_ring); + + return ret; +} + +void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn, + struct mlx5dr_send_ring *send_ring) +{ + dr_destroy_qp(dmn->mdev, send_ring->qp); + dr_destroy_cq(dmn->mdev, send_ring->cq); + dr_dereg_mr(dmn->mdev, send_ring->sync_mr); + dr_dereg_mr(dmn->mdev, send_ring->mr); + kfree(send_ring->buf); + kfree(send_ring); +} + +int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn) +{ + struct mlx5dr_send_ring *send_ring = dmn->send_ring; + struct postsend_info send_info = {}; + u8 data[DR_STE_SIZE]; + int num_of_sends_req; + int ret; + int i; + + /* Sending this amount of requests makes sure we will get drain */ + num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2; + + /* Send fake requests forcing the last to be signaled */ + send_info.write.addr = (uintptr_t)data; + send_info.write.length = DR_STE_SIZE; + send_info.write.lkey = 0; + /* Using the sync_mr in order to write/read */ + send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr; + send_info.rkey = send_ring->sync_mr->mkey.key; + + for (i = 0; i < num_of_sends_req; i++) { + ret = dr_postsend_icm_data(dmn, &send_info); + if (ret) + return ret; + } + + ret = dr_handle_pending_wc(dmn, send_ring); + + return ret; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c new file mode 100644 index 000000000000..6b0af64536d8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -0,0 +1,2308 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include <linux/types.h> +#include "dr_types.h" + +#define DR_STE_CRC_POLY 0xEDB88320L +#define STE_IPV4 0x1 +#define STE_IPV6 0x2 +#define STE_TCP 0x1 +#define STE_UDP 0x2 +#define STE_SPI 0x3 +#define IP_VERSION_IPV4 0x4 +#define IP_VERSION_IPV6 0x6 +#define STE_SVLAN 0x1 +#define STE_CVLAN 0x2 + +#define DR_STE_ENABLE_FLOW_TAG BIT(31) + +/* Set to STE a specific value using DR_STE_SET */ +#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \ + if ((spec)->s_fname) { \ + MLX5_SET(ste_##lookup_type, tag, t_fname, value); \ + (spec)->s_fname = 0; \ + } \ +} while (0) + +/* Set to STE spec->s_fname to tag->t_fname */ +#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \ + DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname) + +/* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */ +#define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \ + DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1) + +/* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */ +#define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \ + DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname) + +#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \ + MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \ + MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \ +} while (0) + +#define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \ + DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \ + in_out##_first_mpls_label);\ + DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \ + in_out##_first_mpls_s_bos); \ + DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \ + in_out##_first_mpls_exp); \ + DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \ + in_out##_first_mpls_ttl); \ +} while (0) + +#define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \ + DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \ + in_out##_first_mpls_label);\ + DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \ + in_out##_first_mpls_s_bos); \ + DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \ + in_out##_first_mpls_exp); \ + DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \ + in_out##_first_mpls_ttl); \ +} while (0) + +#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\ + (_misc)->outer_first_mpls_over_gre_label || \ + (_misc)->outer_first_mpls_over_gre_exp || \ + (_misc)->outer_first_mpls_over_gre_s_bos || \ + (_misc)->outer_first_mpls_over_gre_ttl) +#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\ + (_misc)->outer_first_mpls_over_udp_label || \ + (_misc)->outer_first_mpls_over_udp_exp || \ + (_misc)->outer_first_mpls_over_udp_s_bos || \ + (_misc)->outer_first_mpls_over_udp_ttl) + +#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \ + ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \ + (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \ + MLX5DR_STE_LU_TYPE_##lookup_type##_O) + +enum dr_ste_tunl_action { + DR_STE_TUNL_ACTION_NONE = 0, + DR_STE_TUNL_ACTION_ENABLE = 1, + DR_STE_TUNL_ACTION_DECAP = 2, + DR_STE_TUNL_ACTION_L3_DECAP = 3, + DR_STE_TUNL_ACTION_POP_VLAN = 4, +}; + +enum dr_ste_action_type { + DR_STE_ACTION_TYPE_PUSH_VLAN = 1, + DR_STE_ACTION_TYPE_ENCAP_L3 = 3, + DR_STE_ACTION_TYPE_ENCAP = 4, +}; + +struct dr_hw_ste_format { + u8 ctrl[DR_STE_SIZE_CTRL]; + u8 tag[DR_STE_SIZE_TAG]; + u8 mask[DR_STE_SIZE_MASK]; +}; + +u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + u8 masked[DR_STE_SIZE_TAG] = {}; + u32 crc32, index; + u16 bit; + int i; + + /* Don't calculate CRC if the result is predicted */ + if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0) + return 0; + + /* Mask tag using byte mask, bit per byte */ + bit = 1 << (DR_STE_SIZE_TAG - 1); + for (i = 0; i < DR_STE_SIZE_TAG; i++) { + if (htbl->byte_mask & bit) + masked[i] = hw_ste->tag[i]; + + bit = bit >> 1; + } + + crc32 = mlx5dr_crc32_slice8_calc(masked, DR_STE_SIZE_TAG); + index = crc32 & (htbl->chunk->num_of_entries - 1); + + return index; +} + +static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask) +{ + u16 byte_mask = 0; + int i; + + for (i = 0; i < DR_STE_SIZE_MASK; i++) { + byte_mask = byte_mask << 1; + if (bit_mask[i] == 0xff) + byte_mask |= 1; + } + return byte_mask; +} + +void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + + memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK); +} + +void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag) +{ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer, + DR_STE_ENABLE_FLOW_TAG | flow_tag); +} + +void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id) +{ + /* This can be used for both rx_steering_mult and for sx_transmit */ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id); + MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16); +} + +void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p) +{ + MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1); +} + +void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr, + bool go_back) +{ + MLX5_SET(ste_sx_transmit, hw_ste_p, action_type, + DR_STE_ACTION_TYPE_PUSH_VLAN); + MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr); + /* Due to HW limitation we need to set this bit, otherwise reforamt + + * push vlan will not work. + */ + if (go_back) + mlx5dr_ste_set_go_back_bit(hw_ste_p); +} + +void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3) +{ + MLX5_SET(ste_sx_transmit, hw_ste_p, action_type, + encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP); + /* The hardware expects here size in words (2 byte) */ + MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2); + MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id); +} + +void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p) +{ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, + DR_STE_TUNL_ACTION_DECAP); +} + +void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p) +{ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, + DR_STE_TUNL_ACTION_POP_VLAN); +} + +void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan) +{ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, + DR_STE_TUNL_ACTION_L3_DECAP); + MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0); +} + +void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type) +{ + MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type); +} + +u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p) +{ + return MLX5_GET(ste_general, hw_ste_p, entry_type); +} + +void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions, + u32 re_write_index) +{ + MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions, + num_of_actions); + MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer, + re_write_index); +} + +void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi) +{ + MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi); +} + +void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, + u16 gvmi) +{ + MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type); + MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type); + MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE); + + /* Set GVMI once, this is the same for RX/TX + * bits 63_48 of next table base / miss address encode the next GVMI + */ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi); + MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi); + MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi); +} + +static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste) +{ + memset(&hw_ste->tag, 0, sizeof(hw_ste->tag)); + memset(&hw_ste->mask, 0, sizeof(hw_ste->mask)); +} + +static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste) +{ + hw_ste->tag[0] = 0xdc; + hw_ste->mask[0] = 0; +} + +u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste) +{ + u64 index = + (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) | + MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26); + + return index << 6; +} + +void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size) +{ + u64 index = (icm_addr >> 5) | ht_size; + + MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27); + MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index); +} + +u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste) +{ + u32 index = ste - ste->htbl->ste_arr; + + return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index; +} + +u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste) +{ + u32 index = ste - ste->htbl->ste_arr; + + return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index; +} + +struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste) +{ + u32 index = ste - ste->htbl->ste_arr; + + return &ste->htbl->miss_list[index]; +} + +static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste, + struct mlx5dr_ste_htbl *next_htbl) +{ + struct mlx5dr_icm_chunk *chunk = next_htbl->chunk; + u8 *hw_ste = ste->hw_ste; + + MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask); + MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type); + mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); + + dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste); +} + +bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher, + u8 ste_location) +{ + return ste_location == nic_matcher->num_of_builders; +} + +/* Replace relevant fields, except of: + * htbl - keep the origin htbl + * miss_list + list - already took the src from the list. + * icm_addr/mr_addr - depends on the hosting table. + * + * Before: + * | a | -> | b | -> | c | -> + * + * After: + * | a | -> | c | -> + * While the data that was in b copied to a. + */ +static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) +{ + memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED); + dst->next_htbl = src->next_htbl; + if (dst->next_htbl) + dst->next_htbl->pointing_ste = dst; + + refcount_set(&dst->refcount, refcount_read(&src->refcount)); + + INIT_LIST_HEAD(&dst->rule_list); + list_splice_tail_init(&src->rule_list, &dst->rule_list); +} + +/* Free ste which is the head and the only one in miss_list */ +static void +dr_ste_remove_head_ste(struct mlx5dr_ste *ste, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_ste_send_info *ste_info_head, + struct list_head *send_ste_list, + struct mlx5dr_ste_htbl *stats_tbl) +{ + u8 tmp_data_ste[DR_STE_SIZE] = {}; + struct mlx5dr_ste tmp_ste = {}; + u64 miss_addr; + + tmp_ste.hw_ste = tmp_data_ste; + + /* Use temp ste because dr_ste_always_miss_addr + * touches bit_mask area which doesn't exist at ste->hw_ste. + */ + memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED); + miss_addr = nic_matcher->e_anchor->chunk->icm_addr; + mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr); + memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED); + + list_del_init(&ste->miss_list_node); + + /* Write full STE size in order to have "always_miss" */ + mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, + 0, tmp_data_ste, + ste_info_head, + send_ste_list, + true /* Copy data */); + + stats_tbl->ctrl.num_of_valid_entries--; +} + +/* Free ste which is the head but NOT the only one in miss_list: + * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0 + */ +static void +dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste, + struct mlx5dr_ste_send_info *ste_info_head, + struct list_head *send_ste_list, + struct mlx5dr_ste_htbl *stats_tbl) + +{ + struct mlx5dr_ste_htbl *next_miss_htbl; + + next_miss_htbl = next_ste->htbl; + + /* Remove from the miss_list the next_ste before copy */ + list_del_init(&next_ste->miss_list_node); + + /* All rule-members that use next_ste should know about that */ + mlx5dr_rule_update_rule_member(next_ste, ste); + + /* Move data from next into ste */ + dr_ste_replace(ste, next_ste); + + /* Del the htbl that contains the next_ste. + * The origin htbl stay with the same number of entries. + */ + mlx5dr_htbl_put(next_miss_htbl); + + mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED, + 0, ste->hw_ste, + ste_info_head, + send_ste_list, + true /* Copy data */); + + stats_tbl->ctrl.num_of_collisions--; + stats_tbl->ctrl.num_of_valid_entries--; +} + +/* Free ste that is located in the middle of the miss list: + * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_| + */ +static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste, + struct mlx5dr_ste_send_info *ste_info, + struct list_head *send_ste_list, + struct mlx5dr_ste_htbl *stats_tbl) +{ + struct mlx5dr_ste *prev_ste; + u64 miss_addr; + + prev_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->prev, struct mlx5dr_ste, + miss_list_node); + if (!prev_ste) { + WARN_ON(true); + return; + } + + miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste); + mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr); + + mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0, + prev_ste->hw_ste, ste_info, + send_ste_list, true /* Copy data*/); + + list_del_init(&ste->miss_list_node); + + stats_tbl->ctrl.num_of_valid_entries--; + stats_tbl->ctrl.num_of_collisions--; +} + +void mlx5dr_ste_free(struct mlx5dr_ste *ste, + struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher) +{ + struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info; + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_ste_send_info ste_info_head; + struct mlx5dr_ste *next_ste, *first_ste; + bool put_on_origin_table = true; + struct mlx5dr_ste_htbl *stats_tbl; + LIST_HEAD(send_ste_list); + + first_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->next, + struct mlx5dr_ste, miss_list_node); + stats_tbl = first_ste->htbl; + + /* Two options: + * 1. ste is head: + * a. head ste is the only ste in the miss list + * b. head ste is not the only ste in the miss-list + * 2. ste is not head + */ + if (first_ste == ste) { /* Ste is the head */ + struct mlx5dr_ste *last_ste; + + last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste), + struct mlx5dr_ste, miss_list_node); + if (last_ste == first_ste) + next_ste = NULL; + else + next_ste = list_entry(ste->miss_list_node.next, + struct mlx5dr_ste, miss_list_node); + + if (!next_ste) { + /* One and only entry in the list */ + dr_ste_remove_head_ste(ste, nic_matcher, + &ste_info_head, + &send_ste_list, + stats_tbl); + } else { + /* First but not only entry in the list */ + dr_ste_replace_head_ste(ste, next_ste, &ste_info_head, + &send_ste_list, stats_tbl); + put_on_origin_table = false; + } + } else { /* Ste in the middle of the list */ + dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl); + } + + /* Update HW */ + list_for_each_entry_safe(cur_ste_info, tmp_ste_info, + &send_ste_list, send_list) { + list_del(&cur_ste_info->send_list); + mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste, + cur_ste_info->data, cur_ste_info->size, + cur_ste_info->offset); + } + + if (put_on_origin_table) + mlx5dr_htbl_put(ste->htbl); +} + +bool mlx5dr_ste_equal_tag(void *src, void *dst) +{ + struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src; + struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst; + + return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG); +} + +void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste, + struct mlx5dr_ste_htbl *next_htbl) +{ + struct mlx5dr_icm_chunk *chunk = next_htbl->chunk; + + mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); +} + +void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) +{ + u64 index = miss_addr >> 6; + + /* Miss address for TX and RX STEs located in the same offsets */ + MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26); + MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index); +} + +void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr) +{ + u8 *hw_ste = ste->hw_ste; + + MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE); + mlx5dr_ste_set_miss_addr(hw_ste, miss_addr); + dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste); +} + +/* The assumption here is that we don't update the ste->hw_ste if it is not + * used ste, so it will be all zero, checking the next_lu_type. + */ +bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)p_hw_ste; + + if (MLX5_GET(ste_general, hw_ste, next_lu_type) == + MLX5DR_STE_LU_TYPE_NOP) + return true; + + return false; +} + +bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste) +{ + return !refcount_read(&ste->refcount); +} + +static u16 get_bits_per_mask(u16 byte_mask) +{ + u16 bits = 0; + + while (byte_mask) { + byte_mask = byte_mask & (byte_mask - 1); + bits++; + } + + return bits; +} + +/* Init one ste as a pattern for ste data array */ +void mlx5dr_ste_set_formatted_ste(u16 gvmi, + struct mlx5dr_domain_rx_tx *nic_dmn, + struct mlx5dr_ste_htbl *htbl, + u8 *formatted_ste, + struct mlx5dr_htbl_connect_info *connect_info) +{ + struct mlx5dr_ste ste = {}; + + mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi); + ste.hw_ste = formatted_ste; + + if (connect_info->type == CONNECT_HIT) + dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl); + else + mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr); +} + +int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, + struct mlx5dr_domain_rx_tx *nic_dmn, + struct mlx5dr_ste_htbl *htbl, + struct mlx5dr_htbl_connect_info *connect_info, + bool update_hw_ste) +{ + u8 formatted_ste[DR_STE_SIZE] = {}; + + mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi, + nic_dmn, + htbl, + formatted_ste, + connect_info); + + return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste); +} + +int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_ste *ste, + u8 *cur_hw_ste, + enum mlx5dr_icm_chunk_size log_table_size) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste; + struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_htbl_connect_info info; + struct mlx5dr_ste_htbl *next_htbl; + + if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) { + u32 bits_in_mask; + u8 next_lu_type; + u16 byte_mask; + + next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type); + byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask); + + /* Don't allocate table more than required, + * the size of the table defined via the byte_mask, so no need + * to allocate more than that. + */ + bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE; + log_table_size = min(log_table_size, bits_in_mask); + + next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, + log_table_size, + next_lu_type, + byte_mask); + if (!next_htbl) { + mlx5dr_dbg(dmn, "Failed allocating table\n"); + return -ENOMEM; + } + + /* Write new table to HW */ + info.type = CONNECT_MISS; + info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr; + if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl, + &info, false)) { + mlx5dr_info(dmn, "Failed writing table to HW\n"); + goto free_table; + } + + mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl); + ste->next_htbl = next_htbl; + next_htbl->pointing_ste = ste; + } + + return 0; + +free_table: + mlx5dr_ste_htbl_free(next_htbl); + return -ENOENT; +} + +static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl) +{ + struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl; + int num_of_entries; + + htbl->ctrl.may_grow = true; + + if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1) + htbl->ctrl.may_grow = false; + + /* Threshold is 50%, one is added to table of size 1 */ + num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size); + ctrl->increase_threshold = (num_of_entries + 1) / 2; +} + +struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, + enum mlx5dr_icm_chunk_size chunk_size, + u8 lu_type, u16 byte_mask) +{ + struct mlx5dr_icm_chunk *chunk; + struct mlx5dr_ste_htbl *htbl; + int i; + + htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); + if (!htbl) + return NULL; + + chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size); + if (!chunk) + goto out_free_htbl; + + htbl->chunk = chunk; + htbl->lu_type = lu_type; + htbl->byte_mask = byte_mask; + htbl->ste_arr = chunk->ste_arr; + htbl->hw_ste_arr = chunk->hw_ste_arr; + htbl->miss_list = chunk->miss_list; + refcount_set(&htbl->refcount, 0); + + for (i = 0; i < chunk->num_of_entries; i++) { + struct mlx5dr_ste *ste = &htbl->ste_arr[i]; + + ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED; + ste->htbl = htbl; + refcount_set(&ste->refcount, 0); + INIT_LIST_HEAD(&ste->miss_list_node); + INIT_LIST_HEAD(&htbl->miss_list[i]); + INIT_LIST_HEAD(&ste->rule_list); + } + + htbl->chunk_size = chunk_size; + dr_ste_set_ctrl(htbl); + return htbl; + +out_free_htbl: + kfree(htbl); + return NULL; +} + +int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl) +{ + if (refcount_read(&htbl->refcount)) + return -EBUSY; + + mlx5dr_icm_free_chunk(htbl->chunk); + kfree(htbl); + return 0; +} + +int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, + u8 match_criteria, + struct mlx5dr_match_param *mask, + struct mlx5dr_match_param *value) +{ + if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) { + if (mask->misc.source_port && mask->misc.source_port != 0xffff) { + mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n"); + return -EINVAL; + } + } + + return 0; +} + +int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_match_param *value, + u8 *ste_arr) +{ + struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; + struct mlx5dr_domain *dmn = matcher->tbl->dmn; + struct mlx5dr_ste_build *sb; + int ret, i; + + ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria, + &matcher->mask, value); + if (ret) + return ret; + + sb = nic_matcher->ste_builder; + for (i = 0; i < nic_matcher->num_of_builders; i++) { + mlx5dr_ste_init(ste_arr, + sb->lu_type, + nic_dmn->ste_type, + dmn->info.caps.gvmi); + + mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask); + + ret = sb->ste_build_tag_func(value, sb, ste_arr); + if (ret) + return ret; + + /* Connect the STEs */ + if (i < (nic_matcher->num_of_builders - 1)) { + /* Need the next builder for these fields, + * not relevant for the last ste in the chain. + */ + sb++; + MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type); + MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask); + } + ste_arr += DR_STE_SIZE; + } + return 0; +} + +static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16); + DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0); + + if (mask->smac_47_16 || mask->smac_15_0) { + MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32, + mask->smac_47_16 >> 16); + MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0, + mask->smac_47_16 << 16 | mask->smac_15_0); + mask->smac_47_16 = 0; + mask->smac_15_0 = 0; + } + + DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid); + DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi); + DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio); + DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version); + + if (mask->cvlan_tag) { + MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1); + mask->cvlan_tag = 0; + } else if (mask->svlan_tag) { + MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1); + mask->svlan_tag = 0; + } + + if (mask->cvlan_tag || mask->svlan_tag) { + pr_info("Invalid c/svlan mask configuration\n"); + return -EINVAL; + } + + return 0; +} + +static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec) +{ + spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present); + spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present); + spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present); + spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port); + spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn); + + spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port); + + spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio); + spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi); + spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid); + spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio); + spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi); + spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid); + + spec->outer_second_cvlan_tag = + MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag); + spec->inner_second_cvlan_tag = + MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag); + spec->outer_second_svlan_tag = + MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag); + spec->inner_second_svlan_tag = + MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag); + + spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol); + + spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi); + spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo); + + spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni); + + spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni); + spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam); + + spec->outer_ipv6_flow_label = + MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label); + + spec->inner_ipv6_flow_label = + MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label); + + spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len); + spec->geneve_protocol_type = + MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type); + + spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp); +} + +static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec) +{ + u32 raw_ip[4]; + + spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16); + + spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0); + spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype); + + spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16); + + spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0); + spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio); + spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi); + spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid); + + spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol); + spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp); + spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn); + spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag); + spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag); + spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag); + spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version); + spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags); + spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport); + spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport); + + spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit); + + spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport); + spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport); + + memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + sizeof(raw_ip)); + + spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]); + spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]); + spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]); + spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]); + + memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + sizeof(raw_ip)); + + spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]); + spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]); + spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]); + spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]); +} + +static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec) +{ + spec->outer_first_mpls_label = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label); + spec->outer_first_mpls_exp = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp); + spec->outer_first_mpls_s_bos = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos); + spec->outer_first_mpls_ttl = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl); + spec->inner_first_mpls_label = + MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label); + spec->inner_first_mpls_exp = + MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp); + spec->inner_first_mpls_s_bos = + MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos); + spec->inner_first_mpls_ttl = + MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl); + spec->outer_first_mpls_over_gre_label = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label); + spec->outer_first_mpls_over_gre_exp = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp); + spec->outer_first_mpls_over_gre_s_bos = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos); + spec->outer_first_mpls_over_gre_ttl = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl); + spec->outer_first_mpls_over_udp_label = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label); + spec->outer_first_mpls_over_udp_exp = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp); + spec->outer_first_mpls_over_udp_s_bos = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos); + spec->outer_first_mpls_over_udp_ttl = + MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl); + spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7); + spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6); + spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5); + spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4); + spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3); + spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2); + spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1); + spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0); + spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a); + spec->metadata_reg_b = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_b); +} + +static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec) +{ + spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num); + spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num); + spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num); + spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num); + spec->outer_vxlan_gpe_vni = + MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni); + spec->outer_vxlan_gpe_next_protocol = + MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol); + spec->outer_vxlan_gpe_flags = + MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags); + spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data); + spec->icmpv6_header_data = + MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data); + spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type); + spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code); + spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type); + spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code); +} + +void mlx5dr_ste_copy_param(u8 match_criteria, + struct mlx5dr_match_param *set_param, + struct mlx5dr_match_parameters *mask) +{ + u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {}; + u8 *data = (u8 *)mask->match_buf; + size_t param_location; + void *buff; + + if (match_criteria & DR_MATCHER_CRITERIA_OUTER) { + if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) { + memcpy(tail_param, data, mask->match_sz); + buff = tail_param; + } else { + buff = mask->match_buf; + } + dr_ste_copy_mask_spec(buff, &set_param->outer); + } + param_location = sizeof(struct mlx5dr_match_spec); + + if (match_criteria & DR_MATCHER_CRITERIA_MISC) { + if (mask->match_sz < param_location + + sizeof(struct mlx5dr_match_misc)) { + memcpy(tail_param, data + param_location, + mask->match_sz - param_location); + buff = tail_param; + } else { + buff = data + param_location; + } + dr_ste_copy_mask_misc(buff, &set_param->misc); + } + param_location += sizeof(struct mlx5dr_match_misc); + + if (match_criteria & DR_MATCHER_CRITERIA_INNER) { + if (mask->match_sz < param_location + + sizeof(struct mlx5dr_match_spec)) { + memcpy(tail_param, data + param_location, + mask->match_sz - param_location); + buff = tail_param; + } else { + buff = data + param_location; + } + dr_ste_copy_mask_spec(buff, &set_param->inner); + } + param_location += sizeof(struct mlx5dr_match_spec); + + if (match_criteria & DR_MATCHER_CRITERIA_MISC2) { + if (mask->match_sz < param_location + + sizeof(struct mlx5dr_match_misc2)) { + memcpy(tail_param, data + param_location, + mask->match_sz - param_location); + buff = tail_param; + } else { + buff = data + param_location; + } + dr_ste_copy_mask_misc2(buff, &set_param->misc2); + } + + param_location += sizeof(struct mlx5dr_match_misc2); + + if (match_criteria & DR_MATCHER_CRITERIA_MISC3) { + if (mask->match_sz < param_location + + sizeof(struct mlx5dr_match_misc3)) { + memcpy(tail_param, data + param_location, + mask->match_sz - param_location); + buff = tail_param; + } else { + buff = data + param_location; + } + dr_ste_copy_mask_misc3(buff, &set_param->misc3); + } +} + +static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16); + DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0); + + if (spec->smac_47_16 || spec->smac_15_0) { + MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32, + spec->smac_47_16 >> 16); + MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0, + spec->smac_47_16 << 16 | spec->smac_15_0); + spec->smac_47_16 = 0; + spec->smac_15_0 = 0; + } + + if (spec->ip_version) { + if (spec->ip_version == IP_VERSION_IPV4) { + MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4); + spec->ip_version = 0; + } else if (spec->ip_version == IP_VERSION_IPV6) { + MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6); + spec->ip_version = 0; + } else { + pr_info("Unsupported ip_version value\n"); + return -EINVAL; + } + } + + DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid); + DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi); + DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio); + + if (spec->cvlan_tag) { + MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN); + spec->cvlan_tag = 0; + } else if (spec->svlan_tag) { + MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN); + spec->svlan_tag = 0; + } + return 0; +} + +int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + int ret; + + ret = dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask); + if (ret) + return ret; + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner); + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag; + + return 0; +} + +static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96); + DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64); + DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32); + DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0); +} + +static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96); + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64); + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32); + DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0); + + return 0; +} + +void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner); + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag; +} + +static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96); + DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64); + DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32); + DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0); +} + +static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96); + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64); + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32); + DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0); + + return 0; +} + +void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner); + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag; +} + +static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value, + bool inner, + u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, + destination_address, mask, dst_ip_31_0); + DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, + source_address, mask, src_ip_31_0); + DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, + destination_port, mask, tcp_dport); + DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, + destination_port, mask, udp_dport); + DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, + source_port, mask, tcp_sport); + DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, + source_port, mask, udp_sport); + DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, + protocol, mask, ip_protocol); + DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, + fragmented, mask, frag); + DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, + dscp, mask, ip_dscp); + DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, + ecn, mask, ip_ecn); + + if (mask->tcp_flags) { + DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask); + mask->tcp_flags = 0; + } +} + +static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp); + DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn); + + if (spec->tcp_flags) { + DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec); + spec->tcp_flags = 0; + } + + return 0; +} + +void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner); + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag; +} + +static void +dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc_mask = &value->misc; + + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid); + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi); + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio); + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag); + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype); + DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version); + + if (mask->svlan_tag || mask->cvlan_tag) { + MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1); + mask->cvlan_tag = 0; + mask->svlan_tag = 0; + } + + if (inner) { + if (misc_mask->inner_second_cvlan_tag || + misc_mask->inner_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1); + misc_mask->inner_second_cvlan_tag = 0; + misc_mask->inner_second_svlan_tag = 0; + } + + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, + second_vlan_id, misc_mask, inner_second_vid); + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, + second_cfi, misc_mask, inner_second_cfi); + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, + second_priority, misc_mask, inner_second_prio); + } else { + if (misc_mask->outer_second_cvlan_tag || + misc_mask->outer_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1); + misc_mask->outer_second_cvlan_tag = 0; + misc_mask->outer_second_svlan_tag = 0; + } + + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, + second_vlan_id, misc_mask, outer_second_vid); + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, + second_cfi, misc_mask, outer_second_cfi); + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, + second_priority, misc_mask, outer_second_prio); + } +} + +static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value, + bool inner, u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc_spec = &value->misc; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid); + DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi); + DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio); + DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag); + DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype); + + if (spec->ip_version) { + if (spec->ip_version == IP_VERSION_IPV4) { + MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4); + spec->ip_version = 0; + } else if (spec->ip_version == IP_VERSION_IPV6) { + MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6); + spec->ip_version = 0; + } else { + pr_info("Unsupported ip_version value\n"); + return -EINVAL; + } + } + + if (spec->cvlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN); + spec->cvlan_tag = 0; + } else if (spec->svlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN); + spec->svlan_tag = 0; + } + + if (inner) { + if (misc_spec->inner_second_cvlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN); + misc_spec->inner_second_cvlan_tag = 0; + } else if (misc_spec->inner_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN); + misc_spec->inner_second_svlan_tag = 0; + } + + DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid); + DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi); + DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio); + } else { + if (misc_spec->outer_second_cvlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN); + misc_spec->outer_second_cvlan_tag = 0; + } else if (misc_spec->outer_second_svlan_tag) { + MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN); + misc_spec->outer_second_svlan_tag = 0; + } + DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid); + DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi); + DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio); + } + + return 0; +} + +static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16); + DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0); + + dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask); +} + +static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16); + DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0); + + return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p); +} + +void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask); + sb->rx = rx; + sb->inner = inner; + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner); + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag; +} + +static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16); + DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0); + + dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask); +} + +static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16); + DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0); + + return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p); +} + +void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner); + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag; +} + +static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc = &value->misc; + + DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16); + DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0); + DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid); + DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi); + DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio); + DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag); + DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype); + DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version); + + if (misc->vxlan_vni) { + MLX5_SET(ste_eth_l2_tnl, bit_mask, + l2_tunneling_network_id, (misc->vxlan_vni << 8)); + misc->vxlan_vni = 0; + } + + if (mask->svlan_tag || mask->cvlan_tag) { + MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1); + mask->cvlan_tag = 0; + mask->svlan_tag = 0; + } +} + +static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc *misc = &value->misc; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16); + DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0); + DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid); + DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi); + DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag); + DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio); + DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype); + + if (misc->vxlan_vni) { + MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id, + (misc->vxlan_vni << 8)); + misc->vxlan_vni = 0; + } + + if (spec->cvlan_tag) { + MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN); + spec->cvlan_tag = 0; + } else if (spec->svlan_tag) { + MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN); + spec->svlan_tag = 0; + } + + if (spec->ip_version) { + if (spec->ip_version == IP_VERSION_IPV4) { + MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4); + spec->ip_version = 0; + } else if (spec->ip_version == IP_VERSION_IPV6) { + MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6); + spec->ip_version = 0; + } else { + return -EINVAL; + } + } + + return 0; +} + +void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, bool inner, bool rx) +{ + dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I; + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag; +} + +static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit); +} + +static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit); + + return 0; +} + +void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner); + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag; +} + +static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; + + DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport); + DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport); + DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport); + DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport); + DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol); + DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag); + DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp); + DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn); + DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit); + + if (mask->tcp_flags) { + DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask); + mask->tcp_flags = 0; + } +} + +static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport); + DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport); + DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport); + DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport); + DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol); + DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag); + DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp); + DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn); + DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit); + + if (spec->tcp_flags) { + DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec); + spec->tcp_flags = 0; + } + + return 0; +} + +void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner); + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag; +} + +static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + return 0; +} + +void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx) +{ + sb->rx = rx; + sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE; + sb->byte_mask = 0; + sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag; +} + +static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_misc2 *misc2_mask = &value->misc2; + + if (inner) + DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask); + else + DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask); +} + +static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc2 *misc2_mask = &value->misc2; + u8 *tag = hw_ste->tag; + + if (sb->inner) + DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag); + else + DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag); + + return 0; +} + +void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner); + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_mpls_tag; +} + +static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_misc *misc_mask = &value->misc; + + DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol); + DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present); + DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h); + DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l); + + DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present); + DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present); +} + +static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc *misc = &value->misc; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol); + + DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present); + DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h); + DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l); + + DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present); + + DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present); + + return 0; +} + +void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, bool inner, bool rx) +{ + dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = MLX5DR_STE_LU_TYPE_GRE; + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_gre_tag; +} + +static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; + + if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) { + DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label, + misc_2_mask, outer_first_mpls_over_gre_label); + + DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp, + misc_2_mask, outer_first_mpls_over_gre_exp); + + DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos, + misc_2_mask, outer_first_mpls_over_gre_s_bos); + + DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl, + misc_2_mask, outer_first_mpls_over_gre_ttl); + } else { + DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label, + misc_2_mask, outer_first_mpls_over_udp_label); + + DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp, + misc_2_mask, outer_first_mpls_over_udp_exp); + + DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos, + misc_2_mask, outer_first_mpls_over_udp_s_bos); + + DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl, + misc_2_mask, outer_first_mpls_over_udp_ttl); + } +} + +static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; + u8 *tag = hw_ste->tag; + + if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) { + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, + misc_2_mask, outer_first_mpls_over_gre_label); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, + misc_2_mask, outer_first_mpls_over_gre_exp); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, + misc_2_mask, outer_first_mpls_over_gre_s_bos); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl, + misc_2_mask, outer_first_mpls_over_gre_ttl); + } else { + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, + misc_2_mask, outer_first_mpls_over_udp_label); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, + misc_2_mask, outer_first_mpls_over_udp_exp); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, + misc_2_mask, outer_first_mpls_over_udp_s_bos); + + DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl, + misc_2_mask, outer_first_mpls_over_udp_ttl); + } + return 0; +} + +void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0; + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag; +} + +#define ICMP_TYPE_OFFSET_FIRST_DW 24 +#define ICMP_CODE_OFFSET_FIRST_DW 16 +#define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0 + +static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + u8 *bit_mask) +{ + struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3; + bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask); + u32 icmp_header_data_mask; + u32 icmp_type_mask; + u32 icmp_code_mask; + int dw0_location; + int dw1_location; + + if (is_ipv4_mask) { + icmp_header_data_mask = misc_3_mask->icmpv4_header_data; + icmp_type_mask = misc_3_mask->icmpv4_type; + icmp_code_mask = misc_3_mask->icmpv4_code; + dw0_location = caps->flex_parser_id_icmp_dw0; + dw1_location = caps->flex_parser_id_icmp_dw1; + } else { + icmp_header_data_mask = misc_3_mask->icmpv6_header_data; + icmp_type_mask = misc_3_mask->icmpv6_type; + icmp_code_mask = misc_3_mask->icmpv6_code; + dw0_location = caps->flex_parser_id_icmpv6_dw0; + dw1_location = caps->flex_parser_id_icmpv6_dw1; + } + + switch (dw0_location) { + case 4: + if (icmp_type_mask) { + MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4, + (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW)); + if (is_ipv4_mask) + misc_3_mask->icmpv4_type = 0; + else + misc_3_mask->icmpv6_type = 0; + } + if (icmp_code_mask) { + u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask, + flex_parser_4); + MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4, + cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW)); + if (is_ipv4_mask) + misc_3_mask->icmpv4_code = 0; + else + misc_3_mask->icmpv6_code = 0; + } + break; + default: + return -EINVAL; + } + + switch (dw1_location) { + case 5: + if (icmp_header_data_mask) { + MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5, + (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW)); + if (is_ipv4_mask) + misc_3_mask->icmpv4_header_data = 0; + else + misc_3_mask->icmpv6_header_data = 0; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc3 *misc_3 = &value->misc3; + u8 *tag = hw_ste->tag; + u32 icmp_header_data; + int dw0_location; + int dw1_location; + u32 icmp_type; + u32 icmp_code; + bool is_ipv4; + + is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3); + if (is_ipv4) { + icmp_header_data = misc_3->icmpv4_header_data; + icmp_type = misc_3->icmpv4_type; + icmp_code = misc_3->icmpv4_code; + dw0_location = sb->caps->flex_parser_id_icmp_dw0; + dw1_location = sb->caps->flex_parser_id_icmp_dw1; + } else { + icmp_header_data = misc_3->icmpv6_header_data; + icmp_type = misc_3->icmpv6_type; + icmp_code = misc_3->icmpv6_code; + dw0_location = sb->caps->flex_parser_id_icmpv6_dw0; + dw1_location = sb->caps->flex_parser_id_icmpv6_dw1; + } + + switch (dw0_location) { + case 4: + if (icmp_type) { + MLX5_SET(ste_flex_parser_1, tag, flex_parser_4, + (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW)); + if (is_ipv4) + misc_3->icmpv4_type = 0; + else + misc_3->icmpv6_type = 0; + } + + if (icmp_code) { + u32 cur_val = MLX5_GET(ste_flex_parser_1, tag, + flex_parser_4); + MLX5_SET(ste_flex_parser_1, tag, flex_parser_4, + cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW)); + if (is_ipv4) + misc_3->icmpv4_code = 0; + else + misc_3->icmpv6_code = 0; + } + break; + default: + return -EINVAL; + } + + switch (dw1_location) { + case 5: + if (icmp_header_data) { + MLX5_SET(ste_flex_parser_1, tag, flex_parser_5, + (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW)); + if (is_ipv4) + misc_3->icmpv4_header_data = 0; + else + misc_3->icmpv6_header_data = 0; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx) +{ + int ret; + + ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask); + if (ret) + return ret; + + sb->rx = rx; + sb->inner = inner; + sb->caps = caps; + sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1; + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag; + + return 0; +} + +static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; + + DR_STE_SET_MASK_V(general_purpose, bit_mask, + general_purpose_lookup_field, misc_2_mask, + metadata_reg_a); +} + +static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field, + misc_2_mask, metadata_reg_a); + + return 0; +} + +void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE; + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag; +} + +static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3; + + if (inner) { + DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask, + inner_tcp_seq_num); + DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask, + inner_tcp_ack_num); + } else { + DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask, + outer_tcp_seq_num); + DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask, + outer_tcp_ack_num); + } +} + +static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + u8 *tag = hw_ste->tag; + + if (sb->inner) { + DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num); + DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num); + } else { + DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num); + DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num); + } + + return 0; +} + +void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner); + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag; +} + +static void dr_ste_build_flex_parser_tnl_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) +{ + struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3; + + if (misc_3_mask->outer_vxlan_gpe_flags || + misc_3_mask->outer_vxlan_gpe_next_protocol) { + MLX5_SET(ste_flex_parser_tnl, bit_mask, + flex_parser_tunneling_header_63_32, + (misc_3_mask->outer_vxlan_gpe_flags << 24) | + (misc_3_mask->outer_vxlan_gpe_next_protocol)); + misc_3_mask->outer_vxlan_gpe_flags = 0; + misc_3_mask->outer_vxlan_gpe_next_protocol = 0; + } + + if (misc_3_mask->outer_vxlan_gpe_vni) { + MLX5_SET(ste_flex_parser_tnl, bit_mask, + flex_parser_tunneling_header_31_0, + misc_3_mask->outer_vxlan_gpe_vni << 8); + misc_3_mask->outer_vxlan_gpe_vni = 0; + } +} + +static int dr_ste_build_flex_parser_tnl_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + u8 *tag = hw_ste->tag; + + if (misc3->outer_vxlan_gpe_flags || + misc3->outer_vxlan_gpe_next_protocol) { + MLX5_SET(ste_flex_parser_tnl, tag, + flex_parser_tunneling_header_63_32, + (misc3->outer_vxlan_gpe_flags << 24) | + (misc3->outer_vxlan_gpe_next_protocol)); + misc3->outer_vxlan_gpe_flags = 0; + misc3->outer_vxlan_gpe_next_protocol = 0; + } + + if (misc3->outer_vxlan_gpe_vni) { + MLX5_SET(ste_flex_parser_tnl, tag, + flex_parser_tunneling_header_31_0, + misc3->outer_vxlan_gpe_vni << 8); + misc3->outer_vxlan_gpe_vni = 0; + } + + return 0; +} + +void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_flex_parser_tnl_bit_mask(mask, inner, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER; + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_tag; +} + +static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value, + u8 *bit_mask) +{ + struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; + + DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h, + misc_2_mask, metadata_reg_c_0); + DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l, + misc_2_mask, metadata_reg_c_1); + DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h, + misc_2_mask, metadata_reg_c_2); + DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l, + misc_2_mask, metadata_reg_c_3); +} + +static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0); + DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1); + DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2); + DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3); + + return 0; +} + +void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_register_0_bit_mask(mask, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0; + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_register_0_tag; +} + +static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value, + u8 *bit_mask) +{ + struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; + + DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h, + misc_2_mask, metadata_reg_c_4); + DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l, + misc_2_mask, metadata_reg_c_5); + DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h, + misc_2_mask, metadata_reg_c_6); + DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l, + misc_2_mask, metadata_reg_c_7); +} + +static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc2 *misc2 = &value->misc2; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4); + DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5); + DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6); + DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7); + + return 0; +} + +void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_register_1_bit_mask(mask, sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1; + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_register_1_tag; +} + +static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value, + u8 *bit_mask) +{ + struct mlx5dr_match_misc *misc_mask = &value->misc; + + if (misc_mask->source_port != 0xffff) + return -EINVAL; + + DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port); + DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn); + + return 0; +} + +static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc *misc = &value->misc; + struct mlx5dr_cmd_vport_cap *vport_cap; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn); + + vport_cap = mlx5dr_get_vport_cap(sb->caps, misc->source_port); + if (!vport_cap) + return -EINVAL; + + if (vport_cap->vport_gvmi) + MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi); + + misc->source_port = 0; + + return 0; +} + +int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx) +{ + int ret; + + ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask); + if (ret) + return ret; + + sb->rx = rx; + sb->caps = caps; + sb->inner = inner; + sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP; + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag; + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c new file mode 100644 index 000000000000..e178d8d3dbc9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include "dr_types.h" + +int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, + struct mlx5dr_action *action) +{ + struct mlx5dr_matcher *last_matcher = NULL; + struct mlx5dr_htbl_connect_info info; + struct mlx5dr_ste_htbl *last_htbl; + int ret; + + if (action && action->action_type != DR_ACTION_TYP_FT) + return -EOPNOTSUPP; + + mutex_lock(&tbl->dmn->mutex); + + if (!list_empty(&tbl->matcher_list)) + last_matcher = list_last_entry(&tbl->matcher_list, + struct mlx5dr_matcher, + matcher_list); + + if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX || + tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { + if (last_matcher) + last_htbl = last_matcher->rx.e_anchor; + else + last_htbl = tbl->rx.s_anchor; + + tbl->rx.default_icm_addr = action ? + action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : + tbl->rx.nic_dmn->default_icm_addr; + + info.type = CONNECT_MISS; + info.miss_icm_addr = tbl->rx.default_icm_addr; + + ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn, + tbl->rx.nic_dmn, + last_htbl, + &info, true); + if (ret) { + mlx5dr_dbg(tbl->dmn, "Failed to set RX miss action, ret %d\n", ret); + goto out; + } + } + + if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX || + tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { + if (last_matcher) + last_htbl = last_matcher->tx.e_anchor; + else + last_htbl = tbl->tx.s_anchor; + + tbl->tx.default_icm_addr = action ? + action->dest_tbl.tbl->tx.s_anchor->chunk->icm_addr : + tbl->tx.nic_dmn->default_icm_addr; + + info.type = CONNECT_MISS; + info.miss_icm_addr = tbl->tx.default_icm_addr; + + ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn, + tbl->tx.nic_dmn, + last_htbl, &info, true); + if (ret) { + mlx5dr_dbg(tbl->dmn, "Failed to set TX miss action, ret %d\n", ret); + goto out; + } + } + + /* Release old action */ + if (tbl->miss_action) + refcount_dec(&tbl->miss_action->refcount); + + /* Set new miss action */ + tbl->miss_action = action; + if (tbl->miss_action) + refcount_inc(&action->refcount); + +out: + mutex_unlock(&tbl->dmn->mutex); + return ret; +} + +static void dr_table_uninit_nic(struct mlx5dr_table_rx_tx *nic_tbl) +{ + mlx5dr_htbl_put(nic_tbl->s_anchor); +} + +static void dr_table_uninit_fdb(struct mlx5dr_table *tbl) +{ + dr_table_uninit_nic(&tbl->rx); + dr_table_uninit_nic(&tbl->tx); +} + +static void dr_table_uninit(struct mlx5dr_table *tbl) +{ + mutex_lock(&tbl->dmn->mutex); + + switch (tbl->dmn->type) { + case MLX5DR_DOMAIN_TYPE_NIC_RX: + dr_table_uninit_nic(&tbl->rx); + break; + case MLX5DR_DOMAIN_TYPE_NIC_TX: + dr_table_uninit_nic(&tbl->tx); + break; + case MLX5DR_DOMAIN_TYPE_FDB: + dr_table_uninit_fdb(tbl); + break; + default: + WARN_ON(true); + break; + } + + mutex_unlock(&tbl->dmn->mutex); +} + +static int dr_table_init_nic(struct mlx5dr_domain *dmn, + struct mlx5dr_table_rx_tx *nic_tbl) +{ + struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn; + struct mlx5dr_htbl_connect_info info; + int ret; + + nic_tbl->default_icm_addr = nic_dmn->default_icm_addr; + + nic_tbl->s_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, + DR_CHUNK_SIZE_1, + MLX5DR_STE_LU_TYPE_DONT_CARE, + 0); + if (!nic_tbl->s_anchor) + return -ENOMEM; + + info.type = CONNECT_MISS; + info.miss_icm_addr = nic_dmn->default_icm_addr; + ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, + nic_tbl->s_anchor, + &info, true); + if (ret) + goto free_s_anchor; + + mlx5dr_htbl_get(nic_tbl->s_anchor); + + return 0; + +free_s_anchor: + mlx5dr_ste_htbl_free(nic_tbl->s_anchor); + return ret; +} + +static int dr_table_init_fdb(struct mlx5dr_table *tbl) +{ + int ret; + + ret = dr_table_init_nic(tbl->dmn, &tbl->rx); + if (ret) + return ret; + + ret = dr_table_init_nic(tbl->dmn, &tbl->tx); + if (ret) + goto destroy_rx; + + return 0; + +destroy_rx: + dr_table_uninit_nic(&tbl->rx); + return ret; +} + +static int dr_table_init(struct mlx5dr_table *tbl) +{ + int ret = 0; + + INIT_LIST_HEAD(&tbl->matcher_list); + + mutex_lock(&tbl->dmn->mutex); + + switch (tbl->dmn->type) { + case MLX5DR_DOMAIN_TYPE_NIC_RX: + tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_RX; + tbl->rx.nic_dmn = &tbl->dmn->info.rx; + ret = dr_table_init_nic(tbl->dmn, &tbl->rx); + break; + case MLX5DR_DOMAIN_TYPE_NIC_TX: + tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_TX; + tbl->tx.nic_dmn = &tbl->dmn->info.tx; + ret = dr_table_init_nic(tbl->dmn, &tbl->tx); + break; + case MLX5DR_DOMAIN_TYPE_FDB: + tbl->table_type = MLX5_FLOW_TABLE_TYPE_FDB; + tbl->rx.nic_dmn = &tbl->dmn->info.rx; + tbl->tx.nic_dmn = &tbl->dmn->info.tx; + ret = dr_table_init_fdb(tbl); + break; + default: + WARN_ON(true); + break; + } + + mutex_unlock(&tbl->dmn->mutex); + + return ret; +} + +static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl) +{ + return mlx5dr_cmd_destroy_flow_table(tbl->dmn->mdev, + tbl->table_id, + tbl->table_type); +} + +static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl) +{ + u64 icm_addr_rx = 0; + u64 icm_addr_tx = 0; + int ret; + + if (tbl->rx.s_anchor) + icm_addr_rx = tbl->rx.s_anchor->chunk->icm_addr; + + if (tbl->tx.s_anchor) + icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr; + + ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, + tbl->table_type, + icm_addr_rx, + icm_addr_tx, + tbl->dmn->info.caps.max_ft_level - 1, + true, false, NULL, + &tbl->table_id); + + return ret; +} + +struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level) +{ + struct mlx5dr_table *tbl; + int ret; + + refcount_inc(&dmn->refcount); + + tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); + if (!tbl) + goto dec_ref; + + tbl->dmn = dmn; + tbl->level = level; + refcount_set(&tbl->refcount, 1); + + ret = dr_table_init(tbl); + if (ret) + goto free_tbl; + + ret = dr_table_create_sw_owned_tbl(tbl); + if (ret) + goto uninit_tbl; + + return tbl; + +uninit_tbl: + dr_table_uninit(tbl); +free_tbl: + kfree(tbl); +dec_ref: + refcount_dec(&dmn->refcount); + return NULL; +} + +int mlx5dr_table_destroy(struct mlx5dr_table *tbl) +{ + int ret; + + if (refcount_read(&tbl->refcount) > 1) + return -EBUSY; + + ret = dr_table_destroy_sw_owned_tbl(tbl); + if (ret) + return ret; + + dr_table_uninit(tbl); + + if (tbl->miss_action) + refcount_dec(&tbl->miss_action->refcount); + + refcount_dec(&tbl->dmn->refcount); + kfree(tbl); + + return ret; +} + +u32 mlx5dr_table_get_id(struct mlx5dr_table *tbl) +{ + return tbl->table_id; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h new file mode 100644 index 000000000000..a37ee6359be2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -0,0 +1,1060 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019, Mellanox Technologies */ + +#ifndef _DR_TYPES_ +#define _DR_TYPES_ + +#include <linux/mlx5/driver.h> +#include <linux/refcount.h> +#include "fs_core.h" +#include "wq.h" +#include "lib/mlx5.h" +#include "mlx5_ifc_dr.h" +#include "mlx5dr.h" + +#define DR_RULE_MAX_STES 17 +#define DR_ACTION_MAX_STES 5 +#define WIRE_PORT 0xFFFF +#define DR_STE_SVLAN 0x1 +#define DR_STE_CVLAN 0x2 + +#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg) +#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg) +#define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg) + +enum mlx5dr_icm_chunk_size { + DR_CHUNK_SIZE_1, + DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */ + DR_CHUNK_SIZE_2, + DR_CHUNK_SIZE_4, + DR_CHUNK_SIZE_8, + DR_CHUNK_SIZE_16, + DR_CHUNK_SIZE_32, + DR_CHUNK_SIZE_64, + DR_CHUNK_SIZE_128, + DR_CHUNK_SIZE_256, + DR_CHUNK_SIZE_512, + DR_CHUNK_SIZE_1K, + DR_CHUNK_SIZE_2K, + DR_CHUNK_SIZE_4K, + DR_CHUNK_SIZE_8K, + DR_CHUNK_SIZE_16K, + DR_CHUNK_SIZE_32K, + DR_CHUNK_SIZE_64K, + DR_CHUNK_SIZE_128K, + DR_CHUNK_SIZE_256K, + DR_CHUNK_SIZE_512K, + DR_CHUNK_SIZE_1024K, + DR_CHUNK_SIZE_2048K, + DR_CHUNK_SIZE_MAX, +}; + +enum mlx5dr_icm_type { + DR_ICM_TYPE_STE, + DR_ICM_TYPE_MODIFY_ACTION, +}; + +static inline enum mlx5dr_icm_chunk_size +mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk) +{ + chunk += 2; + if (chunk < DR_CHUNK_SIZE_MAX) + return chunk; + + return DR_CHUNK_SIZE_MAX; +} + +enum { + DR_STE_SIZE = 64, + DR_STE_SIZE_CTRL = 32, + DR_STE_SIZE_TAG = 16, + DR_STE_SIZE_MASK = 16, +}; + +enum { + DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK, +}; + +enum { + DR_MODIFY_ACTION_SIZE = 8, +}; + +enum mlx5dr_matcher_criteria { + DR_MATCHER_CRITERIA_EMPTY = 0, + DR_MATCHER_CRITERIA_OUTER = 1 << 0, + DR_MATCHER_CRITERIA_MISC = 1 << 1, + DR_MATCHER_CRITERIA_INNER = 1 << 2, + DR_MATCHER_CRITERIA_MISC2 = 1 << 3, + DR_MATCHER_CRITERIA_MISC3 = 1 << 4, + DR_MATCHER_CRITERIA_MAX = 1 << 5, +}; + +enum mlx5dr_action_type { + DR_ACTION_TYP_TNL_L2_TO_L2, + DR_ACTION_TYP_L2_TO_TNL_L2, + DR_ACTION_TYP_TNL_L3_TO_L2, + DR_ACTION_TYP_L2_TO_TNL_L3, + DR_ACTION_TYP_DROP, + DR_ACTION_TYP_QP, + DR_ACTION_TYP_FT, + DR_ACTION_TYP_CTR, + DR_ACTION_TYP_TAG, + DR_ACTION_TYP_MODIFY_HDR, + DR_ACTION_TYP_VPORT, + DR_ACTION_TYP_POP_VLAN, + DR_ACTION_TYP_PUSH_VLAN, + DR_ACTION_TYP_MAX, +}; + +struct mlx5dr_icm_pool; +struct mlx5dr_icm_chunk; +struct mlx5dr_icm_bucket; +struct mlx5dr_ste_htbl; +struct mlx5dr_match_param; +struct mlx5dr_cmd_caps; +struct mlx5dr_matcher_rx_tx; + +struct mlx5dr_ste { + u8 *hw_ste; + /* refcount: indicates the num of rules that using this ste */ + refcount_t refcount; + + /* attached to the miss_list head at each htbl entry */ + struct list_head miss_list_node; + + /* each rule member that uses this ste attached here */ + struct list_head rule_list; + + /* this ste is member of htbl */ + struct mlx5dr_ste_htbl *htbl; + + struct mlx5dr_ste_htbl *next_htbl; + + /* this ste is part of a rule, located in ste's chain */ + u8 ste_chain_location; +}; + +struct mlx5dr_ste_htbl_ctrl { + /* total number of valid entries belonging to this hash table. This + * includes the non collision and collision entries + */ + unsigned int num_of_valid_entries; + + /* total number of collisions entries attached to this table */ + unsigned int num_of_collisions; + unsigned int increase_threshold; + u8 may_grow:1; +}; + +struct mlx5dr_ste_htbl { + u8 lu_type; + u16 byte_mask; + refcount_t refcount; + struct mlx5dr_icm_chunk *chunk; + struct mlx5dr_ste *ste_arr; + u8 *hw_ste_arr; + + struct list_head *miss_list; + + enum mlx5dr_icm_chunk_size chunk_size; + struct mlx5dr_ste *pointing_ste; + + struct mlx5dr_ste_htbl_ctrl ctrl; +}; + +struct mlx5dr_ste_send_info { + struct mlx5dr_ste *ste; + struct list_head send_list; + u16 size; + u16 offset; + u8 data_cont[DR_STE_SIZE]; + u8 *data; +}; + +void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size, + u16 offset, u8 *data, + struct mlx5dr_ste_send_info *ste_info, + struct list_head *send_list, + bool copy_data); + +struct mlx5dr_ste_build { + u8 inner:1; + u8 rx:1; + struct mlx5dr_cmd_caps *caps; + u8 lu_type; + u16 byte_mask; + u8 bit_mask[DR_STE_SIZE_MASK]; + int (*ste_build_tag_func)(struct mlx5dr_match_param *spec, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p); +}; + +struct mlx5dr_ste_htbl * +mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, + enum mlx5dr_icm_chunk_size chunk_size, + u8 lu_type, u16 byte_mask); + +int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl); + +static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl) +{ + if (refcount_dec_and_test(&htbl->refcount)) + mlx5dr_ste_htbl_free(htbl); +} + +static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl) +{ + refcount_inc(&htbl->refcount); +} + +/* STE utils */ +u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl); +void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi); +void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste, + struct mlx5dr_ste_htbl *next_htbl); +void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr); +u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste); +void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi); +void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size); +void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr); +void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask); +bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste); +bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher, + u8 ste_location); +void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag); +void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id); +void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, + int size, bool encap_l3); +void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p); +void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan); +void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p); +void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid, + bool go_back); +void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type); +u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p); +void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions, + u32 re_write_index); +void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p); +u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste); +u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste); +struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste); + +void mlx5dr_ste_free(struct mlx5dr_ste *ste, + struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher); +static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste, + struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher) +{ + if (refcount_dec_and_test(&ste->refcount)) + mlx5dr_ste_free(ste, matcher, nic_matcher); +} + +/* initial as 0, increased only when ste appears in a new rule */ +static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste) +{ + refcount_inc(&ste->refcount); +} + +void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste, + struct mlx5dr_ste_htbl *next_htbl); +bool mlx5dr_ste_equal_tag(void *src, void *dst); +int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_ste *ste, + u8 *cur_hw_ste, + enum mlx5dr_icm_chunk_size log_table_size); + +/* STE build functions */ +int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, + u8 match_criteria, + struct mlx5dr_match_param *mask, + struct mlx5dr_match_param *value); +int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_match_param *value, + u8 *ste_arr); +int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); +void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); +void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx); + +/* Actions utils */ +int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + struct mlx5dr_action *actions[], + u32 num_actions, + u8 *ste_arr, + u32 *new_hw_ste_arr_sz); + +struct mlx5dr_match_spec { + u32 smac_47_16; /* Source MAC address of incoming packet */ + /* Incoming packet Ethertype - this is the Ethertype + * following the last VLAN tag of the packet + */ + u32 ethertype:16; + u32 smac_15_0:16; /* Source MAC address of incoming packet */ + u32 dmac_47_16; /* Destination MAC address of incoming packet */ + /* VLAN ID of first VLAN tag in the incoming packet. + * Valid only when cvlan_tag==1 or svlan_tag==1 + */ + u32 first_vid:12; + /* CFI bit of first VLAN tag in the incoming packet. + * Valid only when cvlan_tag==1 or svlan_tag==1 + */ + u32 first_cfi:1; + /* Priority of first VLAN tag in the incoming packet. + * Valid only when cvlan_tag==1 or svlan_tag==1 + */ + u32 first_prio:3; + u32 dmac_15_0:16; /* Destination MAC address of incoming packet */ + /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK; + * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS + */ + u32 tcp_flags:9; + u32 ip_version:4; /* IP version */ + u32 frag:1; /* Packet is an IP fragment */ + /* The first vlan in the packet is s-vlan (0x8a88). + * cvlan_tag and svlan_tag cannot be set together + */ + u32 svlan_tag:1; + /* The first vlan in the packet is c-vlan (0x8100). + * cvlan_tag and svlan_tag cannot be set together + */ + u32 cvlan_tag:1; + /* Explicit Congestion Notification derived from + * Traffic Class/TOS field of IPv6/v4 + */ + u32 ip_ecn:2; + /* Differentiated Services Code Point derived from + * Traffic Class/TOS field of IPv6/v4 + */ + u32 ip_dscp:6; + u32 ip_protocol:8; /* IP protocol */ + /* TCP destination port. + * tcp and udp sport/dport are mutually exclusive + */ + u32 tcp_dport:16; + /* TCP source port.;tcp and udp sport/dport are mutually exclusive */ + u32 tcp_sport:16; + u32 ttl_hoplimit:8; + u32 reserved:24; + /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */ + u32 udp_dport:16; + /* UDP source port.;tcp and udp sport/dport are mutually exclusive */ + u32 udp_sport:16; + /* IPv6 source address of incoming packets + * For IPv4 address use bits 31:0 (rest of the bits are reserved) + * This field should be qualified by an appropriate ethertype + */ + u32 src_ip_127_96; + /* IPv6 source address of incoming packets + * For IPv4 address use bits 31:0 (rest of the bits are reserved) + * This field should be qualified by an appropriate ethertype + */ + u32 src_ip_95_64; + /* IPv6 source address of incoming packets + * For IPv4 address use bits 31:0 (rest of the bits are reserved) + * This field should be qualified by an appropriate ethertype + */ + u32 src_ip_63_32; + /* IPv6 source address of incoming packets + * For IPv4 address use bits 31:0 (rest of the bits are reserved) + * This field should be qualified by an appropriate ethertype + */ + u32 src_ip_31_0; + /* IPv6 destination address of incoming packets + * For IPv4 address use bits 31:0 (rest of the bits are reserved) + * This field should be qualified by an appropriate ethertype + */ + u32 dst_ip_127_96; + /* IPv6 destination address of incoming packets + * For IPv4 address use bits 31:0 (rest of the bits are reserved) + * This field should be qualified by an appropriate ethertype + */ + u32 dst_ip_95_64; + /* IPv6 destination address of incoming packets + * For IPv4 address use bits 31:0 (rest of the bits are reserved) + * This field should be qualified by an appropriate ethertype + */ + u32 dst_ip_63_32; + /* IPv6 destination address of incoming packets + * For IPv4 address use bits 31:0 (rest of the bits are reserved) + * This field should be qualified by an appropriate ethertype + */ + u32 dst_ip_31_0; +}; + +struct mlx5dr_match_misc { + u32 source_sqn:24; /* Source SQN */ + u32 source_vhca_port:4; + /* used with GRE, sequence number exist when gre_s_present == 1 */ + u32 gre_s_present:1; + /* used with GRE, key exist when gre_k_present == 1 */ + u32 gre_k_present:1; + u32 reserved_auto1:1; + /* used with GRE, checksum exist when gre_c_present == 1 */ + u32 gre_c_present:1; + /* Source port.;0xffff determines wire port */ + u32 source_port:16; + u32 reserved_auto2:16; + /* VLAN ID of first VLAN tag the inner header of the incoming packet. + * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1 + */ + u32 inner_second_vid:12; + /* CFI bit of first VLAN tag in the inner header of the incoming packet. + * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1 + */ + u32 inner_second_cfi:1; + /* Priority of second VLAN tag in the inner header of the incoming packet. + * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1 + */ + u32 inner_second_prio:3; + /* VLAN ID of first VLAN tag the outer header of the incoming packet. + * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1 + */ + u32 outer_second_vid:12; + /* CFI bit of first VLAN tag in the outer header of the incoming packet. + * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1 + */ + u32 outer_second_cfi:1; + /* Priority of second VLAN tag in the outer header of the incoming packet. + * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1 + */ + u32 outer_second_prio:3; + u32 gre_protocol:16; /* GRE Protocol (outer) */ + u32 reserved_auto3:12; + /* The second vlan in the inner header of the packet is s-vlan (0x8a88). + * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together + */ + u32 inner_second_svlan_tag:1; + /* The second vlan in the outer header of the packet is s-vlan (0x8a88). + * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together + */ + u32 outer_second_svlan_tag:1; + /* The second vlan in the inner header of the packet is c-vlan (0x8100). + * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together + */ + u32 inner_second_cvlan_tag:1; + /* The second vlan in the outer header of the packet is c-vlan (0x8100). + * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together + */ + u32 outer_second_cvlan_tag:1; + u32 gre_key_l:8; /* GRE Key [7:0] (outer) */ + u32 gre_key_h:24; /* GRE Key[31:8] (outer) */ + u32 reserved_auto4:8; + u32 vxlan_vni:24; /* VXLAN VNI (outer) */ + u32 geneve_oam:1; /* GENEVE OAM field (outer) */ + u32 reserved_auto5:7; + u32 geneve_vni:24; /* GENEVE VNI field (outer) */ + u32 outer_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (outer) */ + u32 reserved_auto6:12; + u32 inner_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (inner) */ + u32 reserved_auto7:12; + u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */ + u32 geneve_opt_len:6; /* GENEVE OptLen (outer) */ + u32 reserved_auto8:10; + u32 bth_dst_qp:24; /* Destination QP in BTH header */ + u32 reserved_auto9:8; + u8 reserved_auto10[20]; +}; + +struct mlx5dr_match_misc2 { + u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */ + u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */ + u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */ + u32 outer_first_mpls_label:20; /* First MPLS LABEL (outer) */ + u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */ + u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */ + u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */ + u32 inner_first_mpls_label:20; /* First MPLS LABEL (inner) */ + u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */ + u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */ + u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */ + u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */ + u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */ + u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */ + u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */ + u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */ + u32 metadata_reg_c_7; /* metadata_reg_c_7 */ + u32 metadata_reg_c_6; /* metadata_reg_c_6 */ + u32 metadata_reg_c_5; /* metadata_reg_c_5 */ + u32 metadata_reg_c_4; /* metadata_reg_c_4 */ + u32 metadata_reg_c_3; /* metadata_reg_c_3 */ + u32 metadata_reg_c_2; /* metadata_reg_c_2 */ + u32 metadata_reg_c_1; /* metadata_reg_c_1 */ + u32 metadata_reg_c_0; /* metadata_reg_c_0 */ + u32 metadata_reg_a; /* metadata_reg_a */ + u32 metadata_reg_b; /* metadata_reg_b */ + u8 reserved_auto2[8]; +}; + +struct mlx5dr_match_misc3 { + u32 inner_tcp_seq_num; + u32 outer_tcp_seq_num; + u32 inner_tcp_ack_num; + u32 outer_tcp_ack_num; + u32 outer_vxlan_gpe_vni:24; + u32 reserved_auto1:8; + u32 reserved_auto2:16; + u32 outer_vxlan_gpe_flags:8; + u32 outer_vxlan_gpe_next_protocol:8; + u32 icmpv4_header_data; + u32 icmpv6_header_data; + u32 icmpv6_code:8; + u32 icmpv6_type:8; + u32 icmpv4_code:8; + u32 icmpv4_type:8; + u8 reserved_auto3[0x1c]; +}; + +struct mlx5dr_match_param { + struct mlx5dr_match_spec outer; + struct mlx5dr_match_misc misc; + struct mlx5dr_match_spec inner; + struct mlx5dr_match_misc2 misc2; + struct mlx5dr_match_misc3 misc3; +}; + +#define DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \ + (_misc3)->icmpv4_code || \ + (_misc3)->icmpv4_header_data) + +struct mlx5dr_esw_caps { + u64 drop_icm_address_rx; + u64 drop_icm_address_tx; + u64 uplink_icm_address_rx; + u64 uplink_icm_address_tx; + bool sw_owner; +}; + +struct mlx5dr_cmd_vport_cap { + u16 vport_gvmi; + u16 vhca_gvmi; + u64 icm_address_rx; + u64 icm_address_tx; + u32 num; +}; + +struct mlx5dr_cmd_caps { + u16 gvmi; + u64 nic_rx_drop_address; + u64 nic_tx_drop_address; + u64 nic_tx_allow_address; + u64 esw_rx_drop_address; + u64 esw_tx_drop_address; + u32 log_icm_size; + u64 hdr_modify_icm_addr; + u32 flex_protocols; + u8 flex_parser_id_icmp_dw0; + u8 flex_parser_id_icmp_dw1; + u8 flex_parser_id_icmpv6_dw0; + u8 flex_parser_id_icmpv6_dw1; + u8 max_ft_level; + u16 roce_min_src_udp; + u8 num_esw_ports; + bool eswitch_manager; + bool rx_sw_owner; + bool tx_sw_owner; + bool fdb_sw_owner; + u32 num_vports; + struct mlx5dr_esw_caps esw_caps; + struct mlx5dr_cmd_vport_cap *vports_caps; + bool prio_tag_required; +}; + +struct mlx5dr_domain_rx_tx { + u64 drop_icm_addr; + u64 default_icm_addr; + enum mlx5dr_ste_entry_type ste_type; +}; + +struct mlx5dr_domain_info { + bool supp_sw_steering; + u32 max_inline_size; + u32 max_send_wr; + u32 max_log_sw_icm_sz; + u32 max_log_action_icm_sz; + struct mlx5dr_domain_rx_tx rx; + struct mlx5dr_domain_rx_tx tx; + struct mlx5dr_cmd_caps caps; +}; + +struct mlx5dr_domain_cache { + struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft; +}; + +struct mlx5dr_domain { + struct mlx5dr_domain *peer_dmn; + struct mlx5_core_dev *mdev; + u32 pdn; + struct mlx5_uars_page *uar; + enum mlx5dr_domain_type type; + refcount_t refcount; + struct mutex mutex; /* protect domain */ + struct mlx5dr_icm_pool *ste_icm_pool; + struct mlx5dr_icm_pool *action_icm_pool; + struct mlx5dr_send_ring *send_ring; + struct mlx5dr_domain_info info; + struct mlx5dr_domain_cache cache; +}; + +struct mlx5dr_table_rx_tx { + struct mlx5dr_ste_htbl *s_anchor; + struct mlx5dr_domain_rx_tx *nic_dmn; + u64 default_icm_addr; +}; + +struct mlx5dr_table { + struct mlx5dr_domain *dmn; + struct mlx5dr_table_rx_tx rx; + struct mlx5dr_table_rx_tx tx; + u32 level; + u32 table_type; + u32 table_id; + struct list_head matcher_list; + struct mlx5dr_action *miss_action; + refcount_t refcount; +}; + +struct mlx5dr_matcher_rx_tx { + struct mlx5dr_ste_htbl *s_htbl; + struct mlx5dr_ste_htbl *e_anchor; + struct mlx5dr_ste_build *ste_builder; + struct mlx5dr_ste_build ste_builder4[DR_RULE_MAX_STES]; + struct mlx5dr_ste_build ste_builder6[DR_RULE_MAX_STES]; + u8 num_of_builders; + u8 num_of_builders4; + u8 num_of_builders6; + u64 default_icm_addr; + struct mlx5dr_table_rx_tx *nic_tbl; +}; + +struct mlx5dr_matcher { + struct mlx5dr_table *tbl; + struct mlx5dr_matcher_rx_tx rx; + struct mlx5dr_matcher_rx_tx tx; + struct list_head matcher_list; + u16 prio; + struct mlx5dr_match_param mask; + u8 match_criteria; + refcount_t refcount; + struct mlx5dv_flow_matcher *dv_matcher; +}; + +struct mlx5dr_rule_member { + struct mlx5dr_ste *ste; + /* attached to mlx5dr_rule via this */ + struct list_head list; + /* attached to mlx5dr_ste via this */ + struct list_head use_ste_list; +}; + +struct mlx5dr_action { + enum mlx5dr_action_type action_type; + refcount_t refcount; + union { + struct { + struct mlx5dr_domain *dmn; + struct mlx5dr_icm_chunk *chunk; + u8 *data; + u32 data_size; + u16 num_of_actions; + u32 index; + u8 allow_rx:1; + u8 allow_tx:1; + u8 modify_ttl:1; + } rewrite; + struct { + struct mlx5dr_domain *dmn; + u32 reformat_id; + u32 reformat_size; + } reformat; + struct { + u8 is_fw_tbl:1; + union { + struct mlx5dr_table *tbl; + struct { + struct mlx5_flow_table *ft; + u64 rx_icm_addr; + u64 tx_icm_addr; + struct mlx5_core_dev *mdev; + } fw_tbl; + }; + } dest_tbl; + struct { + u32 ctr_id; + u32 offeset; + } ctr; + struct { + struct mlx5dr_domain *dmn; + struct mlx5dr_cmd_vport_cap *caps; + u32 num; + } vport; + struct { + u32 vlan_hdr; /* tpid_pcp_dei_vid */ + } push_vlan; + u32 flow_tag; + }; +}; + +enum mlx5dr_connect_type { + CONNECT_HIT = 1, + CONNECT_MISS = 2, +}; + +struct mlx5dr_htbl_connect_info { + enum mlx5dr_connect_type type; + union { + struct mlx5dr_ste_htbl *hit_next_htbl; + u64 miss_icm_addr; + }; +}; + +struct mlx5dr_rule_rx_tx { + struct list_head rule_members_list; + struct mlx5dr_matcher_rx_tx *nic_matcher; +}; + +struct mlx5dr_rule { + struct mlx5dr_matcher *matcher; + struct mlx5dr_rule_rx_tx rx; + struct mlx5dr_rule_rx_tx tx; + struct list_head rule_actions_list; +}; + +void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste, + struct mlx5dr_ste *ste); + +struct mlx5dr_icm_chunk { + struct mlx5dr_icm_bucket *bucket; + struct list_head chunk_list; + u32 rkey; + u32 num_of_entries; + u32 byte_size; + u64 icm_addr; + u64 mr_addr; + + /* Memory optimisation */ + struct mlx5dr_ste *ste_arr; + u8 *hw_ste_arr; + struct list_head *miss_list; +}; + +static inline int +mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED; +} + +static inline int +mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED; +} + +int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + bool ipv6); + +static inline u32 +mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size) +{ + return 1 << chunk_size; +} + +static inline int +mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size, + enum mlx5dr_icm_type icm_type) +{ + int num_of_entries; + int entry_size; + + if (icm_type == DR_ICM_TYPE_STE) + entry_size = DR_STE_SIZE; + else + entry_size = DR_MODIFY_ACTION_SIZE; + + num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size); + + return entry_size * num_of_entries; +} + +static inline struct mlx5dr_cmd_vport_cap * +mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport) +{ + if (!caps->vports_caps || + (vport >= caps->num_vports && vport != WIRE_PORT)) + return NULL; + + if (vport == WIRE_PORT) + vport = caps->num_vports; + + return &caps->vports_caps[vport]; +} + +struct mlx5dr_cmd_query_flow_table_details { + u8 status; + u8 level; + u64 sw_owner_icm_root_1; + u64 sw_owner_icm_root_0; +}; + +/* internal API functions */ +int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, + struct mlx5dr_cmd_caps *caps); +int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev, + bool other_vport, u16 vport_number, + u64 *icm_address_rx, + u64 *icm_address_tx); +int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, + bool other_vport, u16 vport_number, u16 *gvmi); +int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev, + struct mlx5dr_esw_caps *caps); +int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev); +int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id, + u32 group_id, + u32 modify_header_id, + u32 vport_id); +int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id); +int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev, + u32 table_type, + u8 num_of_actions, + u64 *actions, + u32 *modify_header_id); +int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev, + u32 modify_header_id); +int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id, + u32 *group_id); +int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id, + u32 group_id); +int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev, + u32 table_type, + u64 icm_addr_rx, + u64 icm_addr_tx, + u8 level, + bool sw_owner, + bool term_tbl, + u64 *fdb_rx_icm_addr, + u32 *table_id); +int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev, + u32 table_id, + u32 table_type); +int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev, + enum fs_flow_table_type type, + u32 table_id, + struct mlx5dr_cmd_query_flow_table_details *output); +int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev, + enum mlx5_reformat_ctx_type rt, + size_t reformat_size, + void *reformat_data, + u32 *reformat_id); +void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev, + u32 reformat_id); + +struct mlx5dr_cmd_gid_attr { + u8 gid[16]; + u8 mac[6]; + u32 roce_ver; +}; + +struct mlx5dr_cmd_qp_create_attr { + u32 page_id; + u32 pdn; + u32 cqn; + u32 pm_state; + u32 service_type; + u32 buff_umem_id; + u32 db_umem_id; + u32 sq_wqe_cnt; + u32 rq_wqe_cnt; + u32 rq_wqe_shift; +}; + +int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, + u16 index, struct mlx5dr_cmd_gid_attr *attr); + +struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, + enum mlx5dr_icm_type icm_type); +void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool); + +struct mlx5dr_icm_chunk * +mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool, + enum mlx5dr_icm_chunk_size chunk_size); +void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk); +bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste); +int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, + struct mlx5dr_domain_rx_tx *nic_dmn, + struct mlx5dr_ste_htbl *htbl, + struct mlx5dr_htbl_connect_info *connect_info, + bool update_hw_ste); +void mlx5dr_ste_set_formatted_ste(u16 gvmi, + struct mlx5dr_domain_rx_tx *nic_dmn, + struct mlx5dr_ste_htbl *htbl, + u8 *formatted_ste, + struct mlx5dr_htbl_connect_info *connect_info); +void mlx5dr_ste_copy_param(u8 match_criteria, + struct mlx5dr_match_param *set_param, + struct mlx5dr_match_parameters *mask); + +void mlx5dr_crc32_init_table(void); +u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length); + +struct mlx5dr_qp { + struct mlx5_core_dev *mdev; + struct mlx5_wq_qp wq; + struct mlx5_uars_page *uar; + struct mlx5_wq_ctrl wq_ctrl; + struct mlx5_core_qp mqp; + struct { + unsigned int pc; + unsigned int cc; + unsigned int size; + unsigned int *wqe_head; + unsigned int wqe_cnt; + } sq; + struct { + unsigned int pc; + unsigned int cc; + unsigned int size; + unsigned int wqe_cnt; + } rq; + int max_inline_data; +}; + +struct mlx5dr_cq { + struct mlx5_core_dev *mdev; + struct mlx5_cqwq wq; + struct mlx5_wq_ctrl wq_ctrl; + struct mlx5_core_cq mcq; + struct mlx5dr_qp *qp; +}; + +struct mlx5dr_mr { + struct mlx5_core_dev *mdev; + struct mlx5_core_mkey mkey; + dma_addr_t dma_addr; + void *addr; + size_t size; +}; + +#define MAX_SEND_CQE 64 +#define MIN_READ_SYNC 64 + +struct mlx5dr_send_ring { + struct mlx5dr_cq *cq; + struct mlx5dr_qp *qp; + struct mlx5dr_mr *mr; + /* How much wqes are waiting for completion */ + u32 pending_wqe; + /* Signal request per this trash hold value */ + u16 signal_th; + /* Each post_send_size less than max_post_send_size */ + u32 max_post_send_size; + /* manage the send queue */ + u32 tx_head; + void *buf; + u32 buf_size; + struct ib_wc wc[MAX_SEND_CQE]; + u8 sync_buff[MIN_READ_SYNC]; + struct mlx5dr_mr *sync_mr; +}; + +int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn); +void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn, + struct mlx5dr_send_ring *send_ring); +int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn); +int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, + struct mlx5dr_ste *ste, + u8 *data, + u16 size, + u16 offset); +int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn, + struct mlx5dr_ste_htbl *htbl, + u8 *formatted_ste, u8 *mask); +int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn, + struct mlx5dr_ste_htbl *htbl, + u8 *ste_init_data, + bool update_hw_ste); +int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, + struct mlx5dr_action *action); + +struct mlx5dr_fw_recalc_cs_ft { + u64 rx_icm_addr; + u32 table_id; + u32 group_id; + u32 modify_hdr_id; +}; + +struct mlx5dr_fw_recalc_cs_ft * +mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num); +void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn, + struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft); +int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, + u32 vport_num, + u64 *rx_icm_addr); +#endif /* _DR_TYPES_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c new file mode 100644 index 000000000000..3d587d0bdbbe --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -0,0 +1,600 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies */ + +#include "mlx5_core.h" +#include "fs_core.h" +#include "fs_cmd.h" +#include "mlx5dr.h" +#include "fs_dr.h" + +static bool mlx5_dr_is_fw_table(u32 flags) +{ + if (flags & MLX5_FLOW_TABLE_TERMINATION) + return true; + + return false; +} + +static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + u32 underlay_qpn, + bool disconnect) +{ + return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn, + disconnect); +} + +static int set_miss_action(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) +{ + struct mlx5dr_action *old_miss_action; + struct mlx5dr_action *action = NULL; + struct mlx5dr_table *next_tbl; + int err; + + next_tbl = next_ft ? next_ft->fs_dr_table.dr_table : NULL; + if (next_tbl) { + action = mlx5dr_action_create_dest_table(next_tbl); + if (!action) + return -EINVAL; + } + old_miss_action = ft->fs_dr_table.miss_action; + err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action); + if (err && action) { + err = mlx5dr_action_destroy(action); + if (err) { + action = NULL; + mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n", + err); + } + } + ft->fs_dr_table.miss_action = action; + if (old_miss_action) { + err = mlx5dr_action_destroy(old_miss_action); + if (err) + mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n", + err); + } + + return err; +} + +static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + unsigned int log_size, + struct mlx5_flow_table *next_ft) +{ + struct mlx5dr_table *tbl; + int err; + + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, + log_size, + next_ft); + + tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, + ft->level); + if (!tbl) { + mlx5_core_err(ns->dev, "Failed creating dr flow_table\n"); + return -EINVAL; + } + + ft->fs_dr_table.dr_table = tbl; + ft->id = mlx5dr_table_get_id(tbl); + + if (next_ft) { + err = set_miss_action(ns, ft, next_ft); + if (err) { + mlx5dr_table_destroy(tbl); + ft->fs_dr_table.dr_table = NULL; + return err; + } + } + + return 0; +} + +static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft) +{ + struct mlx5dr_action *action = ft->fs_dr_table.miss_action; + int err; + + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft); + + err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table); + if (err) { + mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", + err); + return err; + } + if (action) { + err = mlx5dr_action_destroy(action); + if (err) { + mlx5_core_err(ns->dev, "Failed to destroy action(%d)\n", + err); + return err; + } + } + + return err; +} + +static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) +{ + return set_miss_action(ns, ft, next_ft); +} + +static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + u32 *in, + struct mlx5_flow_group *fg) +{ + struct mlx5dr_matcher *matcher; + u16 priority = MLX5_GET(create_flow_group_in, in, + start_flow_index); + u8 match_criteria_enable = MLX5_GET(create_flow_group_in, + in, + match_criteria_enable); + struct mlx5dr_match_parameters mask; + + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, + fg); + + mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, + in, match_criteria); + mask.match_sz = sizeof(fg->mask.match_criteria); + + matcher = mlx5dr_matcher_create(ft->fs_dr_table.dr_table, + priority, + match_criteria_enable, + &mask); + if (!matcher) { + mlx5_core_err(ns->dev, "Failed creating matcher\n"); + return -EINVAL; + } + + fg->fs_dr_matcher.dr_matcher = matcher; + return 0; +} + +static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg) +{ + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg); + + return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher); +} + +static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain, + struct mlx5_flow_rule *dst) +{ + struct mlx5_flow_destination *dest_attr = &dst->dest_attr; + + return mlx5dr_action_create_dest_vport(domain, dest_attr->vport.num, + dest_attr->vport.flags & + MLX5_FLOW_DEST_VPORT_VHCA_ID, + dest_attr->vport.vhca_id); +} + +static struct mlx5dr_action *create_ft_action(struct mlx5_core_dev *dev, + struct mlx5_flow_rule *dst) +{ + struct mlx5_flow_table *dest_ft = dst->dest_attr.ft; + + if (mlx5_dr_is_fw_table(dest_ft->flags)) + return mlx5dr_create_action_dest_flow_fw_table(dest_ft, dev); + return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table); +} + +static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain, + struct mlx5_fs_vlan *vlan) +{ + u16 n_ethtype = vlan->ethtype; + u8 prio = vlan->prio; + u16 vid = vlan->vid; + u32 vlan_hdr; + + vlan_hdr = (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid; + return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr)); +} + +#define MLX5_FLOW_CONTEXT_ACTION_MAX 20 +static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + struct fs_fte *fte) +{ + struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain; + struct mlx5dr_action *term_action = NULL; + struct mlx5dr_match_parameters params; + struct mlx5_core_dev *dev = ns->dev; + struct mlx5dr_action **fs_dr_actions; + struct mlx5dr_action *tmp_action; + struct mlx5dr_action **actions; + bool delay_encap_set = false; + struct mlx5dr_rule *rule; + struct mlx5_flow_rule *dst; + int fs_dr_num_actions = 0; + int num_actions = 0; + size_t match_sz; + int err = 0; + int i; + + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte); + + actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions), + GFP_KERNEL); + if (!actions) + return -ENOMEM; + + fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, + sizeof(*fs_dr_actions), GFP_KERNEL); + if (!fs_dr_actions) { + kfree(actions); + return -ENOMEM; + } + + match_sz = sizeof(fte->val); + + /* The order of the actions are must to be keep, only the following + * order is supported by SW steering: + * TX: push vlan -> modify header -> encap + * RX: decap -> pop vlan -> modify header + */ + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { + tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { + tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { + enum mlx5dr_action_reformat_type decap_type = + DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2; + + tmp_action = mlx5dr_action_create_packet_reformat(domain, + decap_type, 0, + NULL); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { + bool is_decap = fte->action.pkt_reformat->reformat_type == + MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; + + if (is_decap) + actions[num_actions++] = + fte->action.pkt_reformat->action.dr_action; + else + delay_encap_set = true; + } + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { + tmp_action = + mlx5dr_action_create_pop_vlan(); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) { + tmp_action = + mlx5dr_action_create_pop_vlan(); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + actions[num_actions++] = + fte->action.modify_hdr->action.dr_action; + + if (delay_encap_set) + actions[num_actions++] = + fte->action.pkt_reformat->action.dr_action; + + /* The order of the actions below is not important */ + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { + tmp_action = mlx5dr_action_create_drop(); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + term_action = tmp_action; + } + + if (fte->flow_context.flow_tag) { + tmp_action = + mlx5dr_action_create_tag(fte->flow_context.flow_tag); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + list_for_each_entry(dst, &fte->node.children, node.list) { + enum mlx5_flow_destination_type type = dst->dest_attr.type; + u32 id; + + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -ENOSPC; + goto free_actions; + } + + switch (type) { + case MLX5_FLOW_DESTINATION_TYPE_COUNTER: + id = dst->dest_attr.counter_id; + + tmp_action = + mlx5dr_action_create_flow_counter(id); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + break; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: + tmp_action = create_ft_action(dev, dst); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + term_action = tmp_action; + break; + case MLX5_FLOW_DESTINATION_TYPE_VPORT: + tmp_action = create_vport_action(domain, dst); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + term_action = tmp_action; + break; + default: + err = -EOPNOTSUPP; + goto free_actions; + } + } + } + + params.match_sz = match_sz; + params.match_buf = (u64 *)fte->val; + + if (term_action) + actions[num_actions++] = term_action; + + rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher, + ¶ms, + num_actions, + actions); + if (!rule) { + err = -EINVAL; + goto free_actions; + } + + kfree(actions); + fte->fs_dr_rule.dr_rule = rule; + fte->fs_dr_rule.num_actions = fs_dr_num_actions; + fte->fs_dr_rule.dr_actions = fs_dr_actions; + + return 0; + +free_actions: + for (i = 0; i < fs_dr_num_actions; i++) + if (!IS_ERR_OR_NULL(fs_dr_actions[i])) + mlx5dr_action_destroy(fs_dr_actions[i]); + + mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err); + kfree(actions); + kfree(fs_dr_actions); + return err; +} + +static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, + int reformat_type, + size_t size, + void *reformat_data, + enum mlx5_flow_namespace_type namespace, + struct mlx5_pkt_reformat *pkt_reformat) +{ + struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain; + struct mlx5dr_action *action; + int dr_reformat; + + switch (reformat_type) { + case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: + case MLX5_REFORMAT_TYPE_L2_TO_NVGRE: + case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL: + dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2; + break; + case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2: + dr_reformat = DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2; + break; + case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL: + dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3; + break; + default: + mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n", + reformat_type); + return -EOPNOTSUPP; + } + + action = mlx5dr_action_create_packet_reformat(dr_domain, + dr_reformat, + size, + reformat_data); + if (!action) { + mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n"); + return -EINVAL; + } + + pkt_reformat->action.dr_action = action; + + return 0; +} + +static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, + struct mlx5_pkt_reformat *pkt_reformat) +{ + mlx5dr_action_destroy(pkt_reformat->action.dr_action); +} + +static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns, + u8 namespace, u8 num_actions, + void *modify_actions, + struct mlx5_modify_hdr *modify_hdr) +{ + struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain; + struct mlx5dr_action *action; + size_t actions_sz; + + actions_sz = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * + num_actions; + action = mlx5dr_action_create_modify_header(dr_domain, 0, + actions_sz, + modify_actions); + if (!action) { + mlx5_core_err(ns->dev, "Failed allocating modify-header action\n"); + return -EINVAL; + } + + modify_hdr->action.dr_action = action; + + return 0; +} + +static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, + struct mlx5_modify_hdr *modify_hdr) +{ + mlx5dr_action_destroy(modify_hdr->action.dr_action); +} + +static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + int modify_mask, + struct fs_fte *fte) +{ + return -EOPNOTSUPP; +} + +static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct fs_fte *fte) +{ + struct mlx5_fs_dr_rule *rule = &fte->fs_dr_rule; + int err; + int i; + + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte); + + err = mlx5dr_rule_destroy(rule->dr_rule); + if (err) + return err; + + for (i = 0; i < rule->num_actions; i++) + if (!IS_ERR_OR_NULL(rule->dr_actions[i])) + mlx5dr_action_destroy(rule->dr_actions[i]); + + kfree(rule->dr_actions); + return 0; +} + +static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_root_namespace *peer_ns) +{ + struct mlx5dr_domain *peer_domain = NULL; + + if (peer_ns) + peer_domain = peer_ns->fs_dr_domain.dr_domain; + mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain, + peer_domain); + return 0; +} + +static int mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace *ns) +{ + ns->fs_dr_domain.dr_domain = + mlx5dr_domain_create(ns->dev, + MLX5DR_DOMAIN_TYPE_FDB); + if (!ns->fs_dr_domain.dr_domain) { + mlx5_core_err(ns->dev, "Failed to create dr flow namespace\n"); + return -EOPNOTSUPP; + } + return 0; +} + +static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns) +{ + return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain); +} + +bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev) +{ + return mlx5dr_is_supported(dev); +} + +static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = { + .create_flow_table = mlx5_cmd_dr_create_flow_table, + .destroy_flow_table = mlx5_cmd_dr_destroy_flow_table, + .modify_flow_table = mlx5_cmd_dr_modify_flow_table, + .create_flow_group = mlx5_cmd_dr_create_flow_group, + .destroy_flow_group = mlx5_cmd_dr_destroy_flow_group, + .create_fte = mlx5_cmd_dr_create_fte, + .update_fte = mlx5_cmd_dr_update_fte, + .delete_fte = mlx5_cmd_dr_delete_fte, + .update_root_ft = mlx5_cmd_dr_update_root_ft, + .packet_reformat_alloc = mlx5_cmd_dr_packet_reformat_alloc, + .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc, + .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc, + .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc, + .set_peer = mlx5_cmd_dr_set_peer, + .create_ns = mlx5_cmd_dr_create_ns, + .destroy_ns = mlx5_cmd_dr_destroy_ns, +}; + +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void) +{ + return &mlx5_flow_cmds_dr; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h new file mode 100644 index 000000000000..1fb185d6ac7f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB + * Copyright (c) 2019 Mellanox Technologies + */ + +#ifndef _MLX5_FS_DR_ +#define _MLX5_FS_DR_ + +#include "mlx5dr.h" + +struct mlx5_flow_root_namespace; +struct fs_fte; + +struct mlx5_fs_dr_action { + struct mlx5dr_action *dr_action; +}; + +struct mlx5_fs_dr_ns { + struct mlx5_dr_ns *dr_ns; +}; + +struct mlx5_fs_dr_rule { + struct mlx5dr_rule *dr_rule; + /* Only actions created by fs_dr */ + struct mlx5dr_action **dr_actions; + int num_actions; +}; + +struct mlx5_fs_dr_domain { + struct mlx5dr_domain *dr_domain; +}; + +struct mlx5_fs_dr_matcher { + struct mlx5dr_matcher *dr_matcher; +}; + +struct mlx5_fs_dr_table { + struct mlx5dr_table *dr_table; + struct mlx5dr_action *miss_action; +}; + +#ifdef CONFIG_MLX5_SW_STEERING + +bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev); + +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void); + +#else + +static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void) +{ + return NULL; +} + +static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev) +{ + return false; +} + +#endif /* CONFIG_MLX5_SW_STEERING */ +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h new file mode 100644 index 000000000000..596c927220d9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h @@ -0,0 +1,604 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019, Mellanox Technologies */ + +#ifndef MLX5_IFC_DR_H +#define MLX5_IFC_DR_H + +enum { + MLX5DR_ACTION_MDFY_HW_FLD_L2_0 = 0, + MLX5DR_ACTION_MDFY_HW_FLD_L2_1 = 1, + MLX5DR_ACTION_MDFY_HW_FLD_L2_2 = 2, + MLX5DR_ACTION_MDFY_HW_FLD_L3_0 = 3, + MLX5DR_ACTION_MDFY_HW_FLD_L3_1 = 4, + MLX5DR_ACTION_MDFY_HW_FLD_L3_2 = 5, + MLX5DR_ACTION_MDFY_HW_FLD_L3_3 = 6, + MLX5DR_ACTION_MDFY_HW_FLD_L3_4 = 7, + MLX5DR_ACTION_MDFY_HW_FLD_L4_0 = 8, + MLX5DR_ACTION_MDFY_HW_FLD_L4_1 = 9, + MLX5DR_ACTION_MDFY_HW_FLD_MPLS = 10, + MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_0 = 11, + MLX5DR_ACTION_MDFY_HW_FLD_REG_0 = 12, + MLX5DR_ACTION_MDFY_HW_FLD_REG_1 = 13, + MLX5DR_ACTION_MDFY_HW_FLD_REG_2 = 14, + MLX5DR_ACTION_MDFY_HW_FLD_REG_3 = 15, + MLX5DR_ACTION_MDFY_HW_FLD_L4_2 = 16, + MLX5DR_ACTION_MDFY_HW_FLD_FLEX_0 = 17, + MLX5DR_ACTION_MDFY_HW_FLD_FLEX_1 = 18, + MLX5DR_ACTION_MDFY_HW_FLD_FLEX_2 = 19, + MLX5DR_ACTION_MDFY_HW_FLD_FLEX_3 = 20, + MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_1 = 21, + MLX5DR_ACTION_MDFY_HW_FLD_METADATA = 22, + MLX5DR_ACTION_MDFY_HW_FLD_RESERVED = 23, +}; + +enum { + MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2, + MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3, +}; + +enum { + MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE = 0x0, + MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4 = 0x1, + MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6 = 0x2, +}; + +enum { + MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE = 0x0, + MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP = 0x1, + MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP = 0x2, +}; + +enum { + MLX5DR_STE_LU_TYPE_NOP = 0x00, + MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP = 0x05, + MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I = 0x0a, + MLX5DR_STE_LU_TYPE_ETHL2_DST_O = 0x06, + MLX5DR_STE_LU_TYPE_ETHL2_DST_I = 0x07, + MLX5DR_STE_LU_TYPE_ETHL2_DST_D = 0x1b, + MLX5DR_STE_LU_TYPE_ETHL2_SRC_O = 0x08, + MLX5DR_STE_LU_TYPE_ETHL2_SRC_I = 0x09, + MLX5DR_STE_LU_TYPE_ETHL2_SRC_D = 0x1c, + MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_O = 0x36, + MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_I = 0x37, + MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_D = 0x38, + MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d, + MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e, + MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e, + MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f, + MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10, + MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f, + MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11, + MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12, + MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20, + MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29, + MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a, + MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b, + MLX5DR_STE_LU_TYPE_ETHL4_O = 0x13, + MLX5DR_STE_LU_TYPE_ETHL4_I = 0x14, + MLX5DR_STE_LU_TYPE_ETHL4_D = 0x21, + MLX5DR_STE_LU_TYPE_ETHL4_MISC_O = 0x2c, + MLX5DR_STE_LU_TYPE_ETHL4_MISC_I = 0x2d, + MLX5DR_STE_LU_TYPE_ETHL4_MISC_D = 0x2e, + MLX5DR_STE_LU_TYPE_MPLS_FIRST_O = 0x15, + MLX5DR_STE_LU_TYPE_MPLS_FIRST_I = 0x24, + MLX5DR_STE_LU_TYPE_MPLS_FIRST_D = 0x25, + MLX5DR_STE_LU_TYPE_GRE = 0x16, + MLX5DR_STE_LU_TYPE_FLEX_PARSER_0 = 0x22, + MLX5DR_STE_LU_TYPE_FLEX_PARSER_1 = 0x23, + MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19, + MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE = 0x18, + MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0 = 0x2f, + MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1 = 0x30, + MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f, +}; + +enum mlx5dr_ste_entry_type { + MLX5DR_STE_TYPE_TX = 1, + MLX5DR_STE_TYPE_RX = 2, + MLX5DR_STE_TYPE_MODIFY_PKT = 6, +}; + +struct mlx5_ifc_ste_general_bits { + u8 entry_type[0x4]; + u8 reserved_at_4[0x4]; + u8 entry_sub_type[0x8]; + u8 byte_mask[0x10]; + + u8 next_table_base_63_48[0x10]; + u8 next_lu_type[0x8]; + u8 next_table_base_39_32_size[0x8]; + + u8 next_table_base_31_5_size[0x1b]; + u8 linear_hash_enable[0x1]; + u8 reserved_at_5c[0x2]; + u8 next_table_rank[0x2]; + + u8 reserved_at_60[0xa0]; + u8 tag_value[0x60]; + u8 bit_mask[0x60]; +}; + +struct mlx5_ifc_ste_sx_transmit_bits { + u8 entry_type[0x4]; + u8 reserved_at_4[0x4]; + u8 entry_sub_type[0x8]; + u8 byte_mask[0x10]; + + u8 next_table_base_63_48[0x10]; + u8 next_lu_type[0x8]; + u8 next_table_base_39_32_size[0x8]; + + u8 next_table_base_31_5_size[0x1b]; + u8 linear_hash_enable[0x1]; + u8 reserved_at_5c[0x2]; + u8 next_table_rank[0x2]; + + u8 sx_wire[0x1]; + u8 sx_func_lb[0x1]; + u8 sx_sniffer[0x1]; + u8 sx_wire_enable[0x1]; + u8 sx_func_lb_enable[0x1]; + u8 sx_sniffer_enable[0x1]; + u8 action_type[0x3]; + u8 reserved_at_69[0x1]; + u8 action_description[0x6]; + u8 gvmi[0x10]; + + u8 encap_pointer_vlan_data[0x20]; + + u8 loopback_syndome_en[0x8]; + u8 loopback_syndome[0x8]; + u8 counter_trigger[0x10]; + + u8 miss_address_63_48[0x10]; + u8 counter_trigger_23_16[0x8]; + u8 miss_address_39_32[0x8]; + + u8 miss_address_31_6[0x1a]; + u8 learning_point[0x1]; + u8 go_back[0x1]; + u8 match_polarity[0x1]; + u8 mask_mode[0x1]; + u8 miss_rank[0x2]; +}; + +struct mlx5_ifc_ste_rx_steering_mult_bits { + u8 entry_type[0x4]; + u8 reserved_at_4[0x4]; + u8 entry_sub_type[0x8]; + u8 byte_mask[0x10]; + + u8 next_table_base_63_48[0x10]; + u8 next_lu_type[0x8]; + u8 next_table_base_39_32_size[0x8]; + + u8 next_table_base_31_5_size[0x1b]; + u8 linear_hash_enable[0x1]; + u8 reserved_at_[0x2]; + u8 next_table_rank[0x2]; + + u8 member_count[0x10]; + u8 gvmi[0x10]; + + u8 qp_list_pointer[0x20]; + + u8 reserved_at_a0[0x1]; + u8 tunneling_action[0x3]; + u8 action_description[0x4]; + u8 reserved_at_a8[0x8]; + u8 counter_trigger_15_0[0x10]; + + u8 miss_address_63_48[0x10]; + u8 counter_trigger_23_16[0x08]; + u8 miss_address_39_32[0x8]; + + u8 miss_address_31_6[0x1a]; + u8 learning_point[0x1]; + u8 fail_on_error[0x1]; + u8 match_polarity[0x1]; + u8 mask_mode[0x1]; + u8 miss_rank[0x2]; +}; + +struct mlx5_ifc_ste_modify_packet_bits { + u8 entry_type[0x4]; + u8 reserved_at_4[0x4]; + u8 entry_sub_type[0x8]; + u8 byte_mask[0x10]; + + u8 next_table_base_63_48[0x10]; + u8 next_lu_type[0x8]; + u8 next_table_base_39_32_size[0x8]; + + u8 next_table_base_31_5_size[0x1b]; + u8 linear_hash_enable[0x1]; + u8 reserved_at_[0x2]; + u8 next_table_rank[0x2]; + + u8 number_of_re_write_actions[0x10]; + u8 gvmi[0x10]; + + u8 header_re_write_actions_pointer[0x20]; + + u8 reserved_at_a0[0x1]; + u8 tunneling_action[0x3]; + u8 action_description[0x4]; + u8 reserved_at_a8[0x8]; + u8 counter_trigger_15_0[0x10]; + + u8 miss_address_63_48[0x10]; + u8 counter_trigger_23_16[0x08]; + u8 miss_address_39_32[0x8]; + + u8 miss_address_31_6[0x1a]; + u8 learning_point[0x1]; + u8 fail_on_error[0x1]; + u8 match_polarity[0x1]; + u8 mask_mode[0x1]; + u8 miss_rank[0x2]; +}; + +struct mlx5_ifc_ste_eth_l2_src_bits { + u8 smac_47_16[0x20]; + + u8 smac_15_0[0x10]; + u8 l3_ethertype[0x10]; + + u8 qp_type[0x2]; + u8 ethertype_filter[0x1]; + u8 reserved_at_43[0x1]; + u8 sx_sniffer[0x1]; + u8 force_lb[0x1]; + u8 functional_lb[0x1]; + u8 port[0x1]; + u8 reserved_at_48[0x4]; + u8 first_priority[0x3]; + u8 first_cfi[0x1]; + u8 first_vlan_qualifier[0x2]; + u8 reserved_at_52[0x2]; + u8 first_vlan_id[0xc]; + + u8 ip_fragmented[0x1]; + u8 tcp_syn[0x1]; + u8 encp_type[0x2]; + u8 l3_type[0x2]; + u8 l4_type[0x2]; + u8 reserved_at_68[0x4]; + u8 second_priority[0x3]; + u8 second_cfi[0x1]; + u8 second_vlan_qualifier[0x2]; + u8 reserved_at_72[0x2]; + u8 second_vlan_id[0xc]; +}; + +struct mlx5_ifc_ste_eth_l2_dst_bits { + u8 dmac_47_16[0x20]; + + u8 dmac_15_0[0x10]; + u8 l3_ethertype[0x10]; + + u8 qp_type[0x2]; + u8 ethertype_filter[0x1]; + u8 reserved_at_43[0x1]; + u8 sx_sniffer[0x1]; + u8 force_lb[0x1]; + u8 functional_lb[0x1]; + u8 port[0x1]; + u8 reserved_at_48[0x4]; + u8 first_priority[0x3]; + u8 first_cfi[0x1]; + u8 first_vlan_qualifier[0x2]; + u8 reserved_at_52[0x2]; + u8 first_vlan_id[0xc]; + + u8 ip_fragmented[0x1]; + u8 tcp_syn[0x1]; + u8 encp_type[0x2]; + u8 l3_type[0x2]; + u8 l4_type[0x2]; + u8 reserved_at_68[0x4]; + u8 second_priority[0x3]; + u8 second_cfi[0x1]; + u8 second_vlan_qualifier[0x2]; + u8 reserved_at_72[0x2]; + u8 second_vlan_id[0xc]; +}; + +struct mlx5_ifc_ste_eth_l2_src_dst_bits { + u8 dmac_47_16[0x20]; + + u8 dmac_15_0[0x10]; + u8 smac_47_32[0x10]; + + u8 smac_31_0[0x20]; + + u8 sx_sniffer[0x1]; + u8 force_lb[0x1]; + u8 functional_lb[0x1]; + u8 port[0x1]; + u8 l3_type[0x2]; + u8 reserved_at_66[0x6]; + u8 first_priority[0x3]; + u8 first_cfi[0x1]; + u8 first_vlan_qualifier[0x2]; + u8 reserved_at_72[0x2]; + u8 first_vlan_id[0xc]; +}; + +struct mlx5_ifc_ste_eth_l3_ipv4_5_tuple_bits { + u8 destination_address[0x20]; + + u8 source_address[0x20]; + + u8 source_port[0x10]; + u8 destination_port[0x10]; + + u8 fragmented[0x1]; + u8 first_fragment[0x1]; + u8 reserved_at_62[0x2]; + u8 reserved_at_64[0x1]; + u8 ecn[0x2]; + u8 tcp_ns[0x1]; + u8 tcp_cwr[0x1]; + u8 tcp_ece[0x1]; + u8 tcp_urg[0x1]; + u8 tcp_ack[0x1]; + u8 tcp_psh[0x1]; + u8 tcp_rst[0x1]; + u8 tcp_syn[0x1]; + u8 tcp_fin[0x1]; + u8 dscp[0x6]; + u8 reserved_at_76[0x2]; + u8 protocol[0x8]; +}; + +struct mlx5_ifc_ste_eth_l3_ipv6_dst_bits { + u8 dst_ip_127_96[0x20]; + + u8 dst_ip_95_64[0x20]; + + u8 dst_ip_63_32[0x20]; + + u8 dst_ip_31_0[0x20]; +}; + +struct mlx5_ifc_ste_eth_l2_tnl_bits { + u8 dmac_47_16[0x20]; + + u8 dmac_15_0[0x10]; + u8 l3_ethertype[0x10]; + + u8 l2_tunneling_network_id[0x20]; + + u8 ip_fragmented[0x1]; + u8 tcp_syn[0x1]; + u8 encp_type[0x2]; + u8 l3_type[0x2]; + u8 l4_type[0x2]; + u8 first_priority[0x3]; + u8 first_cfi[0x1]; + u8 reserved_at_6c[0x3]; + u8 gre_key_flag[0x1]; + u8 first_vlan_qualifier[0x2]; + u8 reserved_at_72[0x2]; + u8 first_vlan_id[0xc]; +}; + +struct mlx5_ifc_ste_eth_l3_ipv6_src_bits { + u8 src_ip_127_96[0x20]; + + u8 src_ip_95_64[0x20]; + + u8 src_ip_63_32[0x20]; + + u8 src_ip_31_0[0x20]; +}; + +struct mlx5_ifc_ste_eth_l3_ipv4_misc_bits { + u8 version[0x4]; + u8 ihl[0x4]; + u8 reserved_at_8[0x8]; + u8 total_length[0x10]; + + u8 identification[0x10]; + u8 flags[0x3]; + u8 fragment_offset[0xd]; + + u8 time_to_live[0x8]; + u8 reserved_at_48[0x8]; + u8 checksum[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_ste_eth_l4_bits { + u8 fragmented[0x1]; + u8 first_fragment[0x1]; + u8 reserved_at_2[0x6]; + u8 protocol[0x8]; + u8 dst_port[0x10]; + + u8 ipv6_version[0x4]; + u8 reserved_at_24[0x1]; + u8 ecn[0x2]; + u8 tcp_ns[0x1]; + u8 tcp_cwr[0x1]; + u8 tcp_ece[0x1]; + u8 tcp_urg[0x1]; + u8 tcp_ack[0x1]; + u8 tcp_psh[0x1]; + u8 tcp_rst[0x1]; + u8 tcp_syn[0x1]; + u8 tcp_fin[0x1]; + u8 src_port[0x10]; + + u8 ipv6_payload_length[0x10]; + u8 ipv6_hop_limit[0x8]; + u8 dscp[0x6]; + u8 reserved_at_5e[0x2]; + + u8 tcp_data_offset[0x4]; + u8 reserved_at_64[0x8]; + u8 flow_label[0x14]; +}; + +struct mlx5_ifc_ste_eth_l4_misc_bits { + u8 checksum[0x10]; + u8 length[0x10]; + + u8 seq_num[0x20]; + + u8 ack_num[0x20]; + + u8 urgent_pointer[0x10]; + u8 window_size[0x10]; +}; + +struct mlx5_ifc_ste_mpls_bits { + u8 mpls0_label[0x14]; + u8 mpls0_exp[0x3]; + u8 mpls0_s_bos[0x1]; + u8 mpls0_ttl[0x8]; + + u8 mpls1_label[0x20]; + + u8 mpls2_label[0x20]; + + u8 reserved_at_60[0x16]; + u8 mpls4_s_bit[0x1]; + u8 mpls4_qualifier[0x1]; + u8 mpls3_s_bit[0x1]; + u8 mpls3_qualifier[0x1]; + u8 mpls2_s_bit[0x1]; + u8 mpls2_qualifier[0x1]; + u8 mpls1_s_bit[0x1]; + u8 mpls1_qualifier[0x1]; + u8 mpls0_s_bit[0x1]; + u8 mpls0_qualifier[0x1]; +}; + +struct mlx5_ifc_ste_register_0_bits { + u8 register_0_h[0x20]; + + u8 register_0_l[0x20]; + + u8 register_1_h[0x20]; + + u8 register_1_l[0x20]; +}; + +struct mlx5_ifc_ste_register_1_bits { + u8 register_2_h[0x20]; + + u8 register_2_l[0x20]; + + u8 register_3_h[0x20]; + + u8 register_3_l[0x20]; +}; + +struct mlx5_ifc_ste_gre_bits { + u8 gre_c_present[0x1]; + u8 reserved_at_30[0x1]; + u8 gre_k_present[0x1]; + u8 gre_s_present[0x1]; + u8 strict_src_route[0x1]; + u8 recur[0x3]; + u8 flags[0x5]; + u8 version[0x3]; + u8 gre_protocol[0x10]; + + u8 checksum[0x10]; + u8 offset[0x10]; + + u8 gre_key_h[0x18]; + u8 gre_key_l[0x8]; + + u8 seq_num[0x20]; +}; + +struct mlx5_ifc_ste_flex_parser_0_bits { + u8 parser_3_label[0x14]; + u8 parser_3_exp[0x3]; + u8 parser_3_s_bos[0x1]; + u8 parser_3_ttl[0x8]; + + u8 flex_parser_2[0x20]; + + u8 flex_parser_1[0x20]; + + u8 flex_parser_0[0x20]; +}; + +struct mlx5_ifc_ste_flex_parser_1_bits { + u8 flex_parser_7[0x20]; + + u8 flex_parser_6[0x20]; + + u8 flex_parser_5[0x20]; + + u8 flex_parser_4[0x20]; +}; + +struct mlx5_ifc_ste_flex_parser_tnl_bits { + u8 flex_parser_tunneling_header_63_32[0x20]; + + u8 flex_parser_tunneling_header_31_0[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_ste_general_purpose_bits { + u8 general_purpose_lookup_field[0x20]; + + u8 reserved_at_20[0x20]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_ste_src_gvmi_qp_bits { + u8 loopback_syndrome[0x8]; + u8 reserved_at_8[0x8]; + u8 source_gvmi[0x10]; + + u8 reserved_at_20[0x5]; + u8 force_lb[0x1]; + u8 functional_lb[0x1]; + u8 source_is_requestor[0x1]; + u8 source_qp[0x18]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_l2_hdr_bits { + u8 dmac_47_16[0x20]; + + u8 dmac_15_0[0x10]; + u8 smac_47_32[0x10]; + + u8 smac_31_0[0x20]; + + u8 ethertype[0x10]; + u8 vlan_type[0x10]; + + u8 vlan[0x10]; + u8 reserved_at_90[0x10]; +}; + +/* Both HW set and HW add share the same HW format with different opcodes */ +struct mlx5_ifc_dr_action_hw_set_bits { + u8 opcode[0x8]; + u8 destination_field_code[0x8]; + u8 reserved_at_10[0x2]; + u8 destination_left_shifter[0x6]; + u8 reserved_at_18[0x3]; + u8 destination_length[0x5]; + + u8 inline_data[0x20]; +}; + +#endif /* MLX5_IFC_DR_H */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h new file mode 100644 index 000000000000..adda9cbfba45 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -0,0 +1,212 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019, Mellanox Technologies */ + +#ifndef _MLX5DR_H_ +#define _MLX5DR_H_ + +struct mlx5dr_domain; +struct mlx5dr_table; +struct mlx5dr_matcher; +struct mlx5dr_rule; +struct mlx5dr_action; + +enum mlx5dr_domain_type { + MLX5DR_DOMAIN_TYPE_NIC_RX, + MLX5DR_DOMAIN_TYPE_NIC_TX, + MLX5DR_DOMAIN_TYPE_FDB, +}; + +enum mlx5dr_domain_sync_flags { + MLX5DR_DOMAIN_SYNC_FLAGS_SW = 1 << 0, + MLX5DR_DOMAIN_SYNC_FLAGS_HW = 1 << 1, +}; + +enum mlx5dr_action_reformat_type { + DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2, + DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2, + DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2, + DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3, +}; + +struct mlx5dr_match_parameters { + size_t match_sz; + u64 *match_buf; /* Device spec format */ +}; + +#ifdef CONFIG_MLX5_SW_STEERING + +struct mlx5dr_domain * +mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type); + +int mlx5dr_domain_destroy(struct mlx5dr_domain *domain); + +int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags); + +void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, + struct mlx5dr_domain *peer_dmn); + +struct mlx5dr_table * +mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level); + +int mlx5dr_table_destroy(struct mlx5dr_table *table); + +u32 mlx5dr_table_get_id(struct mlx5dr_table *table); + +struct mlx5dr_matcher * +mlx5dr_matcher_create(struct mlx5dr_table *table, + u16 priority, + u8 match_criteria_enable, + struct mlx5dr_match_parameters *mask); + +int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher); + +struct mlx5dr_rule * +mlx5dr_rule_create(struct mlx5dr_matcher *matcher, + struct mlx5dr_match_parameters *value, + size_t num_actions, + struct mlx5dr_action *actions[]); + +int mlx5dr_rule_destroy(struct mlx5dr_rule *rule); + +int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, + struct mlx5dr_action *action); + +struct mlx5dr_action * +mlx5dr_action_create_dest_table(struct mlx5dr_table *table); + +struct mlx5dr_action * +mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft, + struct mlx5_core_dev *mdev); + +struct mlx5dr_action * +mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, + u32 vport, u8 vhca_id_valid, + u16 vhca_id); + +struct mlx5dr_action *mlx5dr_action_create_drop(void); + +struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value); + +struct mlx5dr_action * +mlx5dr_action_create_flow_counter(u32 counter_id); + +struct mlx5dr_action * +mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn, + enum mlx5dr_action_reformat_type reformat_type, + size_t data_sz, + void *data); + +struct mlx5dr_action * +mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain, + u32 flags, + size_t actions_sz, + __be64 actions[]); + +struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void); + +struct mlx5dr_action * +mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr); + +int mlx5dr_action_destroy(struct mlx5dr_action *action); + +static inline bool +mlx5dr_is_supported(struct mlx5_core_dev *dev) +{ + return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner); +} + +#else /* CONFIG_MLX5_SW_STEERING */ + +static inline struct mlx5dr_domain * +mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) { return NULL; } + +static inline int +mlx5dr_domain_destroy(struct mlx5dr_domain *domain) { return 0; } + +static inline int +mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags) { return 0; } + +static inline void +mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, + struct mlx5dr_domain *peer_dmn) { } + +static inline struct mlx5dr_table * +mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level) { return NULL; } + +static inline int +mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; } + +static inline u32 +mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; } + +static inline struct mlx5dr_matcher * +mlx5dr_matcher_create(struct mlx5dr_table *table, + u16 priority, + u8 match_criteria_enable, + struct mlx5dr_match_parameters *mask) { return NULL; } + +static inline int +mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher) { return 0; } + +static inline struct mlx5dr_rule * +mlx5dr_rule_create(struct mlx5dr_matcher *matcher, + struct mlx5dr_match_parameters *value, + size_t num_actions, + struct mlx5dr_action *actions[]) { return NULL; } + +static inline int +mlx5dr_rule_destroy(struct mlx5dr_rule *rule) { return 0; } + +static inline int +mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, + struct mlx5dr_action *action) { return 0; } + +static inline struct mlx5dr_action * +mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; } + +static inline struct mlx5dr_action * +mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft, + struct mlx5_core_dev *mdev) { return NULL; } + +static inline struct mlx5dr_action * +mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, + u32 vport, u8 vhca_id_valid, + u16 vhca_id) { return NULL; } + +static inline struct mlx5dr_action * +mlx5dr_action_create_drop(void) { return NULL; } + +static inline struct mlx5dr_action * +mlx5dr_action_create_tag(u32 tag_value) { return NULL; } + +static inline struct mlx5dr_action * +mlx5dr_action_create_flow_counter(u32 counter_id) { return NULL; } + +static inline struct mlx5dr_action * +mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn, + enum mlx5dr_action_reformat_type reformat_type, + size_t data_sz, + void *data) { return NULL; } + +static inline struct mlx5dr_action * +mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain, + u32 flags, + size_t actions_sz, + __be64 actions[]) { return NULL; } + +static inline struct mlx5dr_action * +mlx5dr_action_create_pop_vlan(void) { return NULL; } + +static inline struct mlx5dr_action * +mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, + __be32 vlan_hdr) { return NULL; } + +static inline int +mlx5dr_action_destroy(struct mlx5dr_action *action) { return 0; } + +static inline bool +mlx5dr_is_supported(struct mlx5_core_dev *dev) { return false; } + +#endif /* CONFIG_MLX5_SW_STEERING */ + +#endif /* _MLX5DR_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index c912d82ca64b..30f7848a6f88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -122,12 +122,13 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline_mode) { switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: + if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode)) + break; + /* fall through */ case MLX5_CAP_INLINE_MODE_L2: *min_inline_mode = MLX5_INLINE_MODE_L2; break; - case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: - mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); - break; case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: *min_inline_mode = MLX5_INLINE_MODE_NONE; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 953cc8efba69..dd2315ce4441 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -44,6 +44,11 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) return wq->fbc.sz_m1 + 1; } +u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq) +{ + return wq->fbc.log_stride; +} + u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) { return (u32)wq->fbc.sz_m1 + 1; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index f1ec58c9e9e3..55791f71a778 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -89,6 +89,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, struct mlx5_wq_ctrl *wq_ctrl); u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); +u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq); int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 06c80343d9ed..f458fd1ce9f8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -71,7 +71,7 @@ config MLXSW_SWITCHX2 module will be called mlxsw_switchx2. config MLXSW_SPECTRUM - tristate "Mellanox Technologies Spectrum support" + tristate "Mellanox Technologies Spectrum family support" depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q depends on PSAMPLE || PSAMPLE=n depends on BRIDGE || BRIDGE=n @@ -87,8 +87,8 @@ config MLXSW_SPECTRUM select NET_PTP_CLASSIFY if PTP_1588_CLOCK default m ---help--- - This driver supports Mellanox Technologies Spectrum Ethernet - Switch ASICs. + This driver supports Mellanox Technologies + Spectrum/Spectrum-2/Spectrum-3 Ethernet Switch ASICs. To compile this driver as a module, choose M here: the module will be called mlxsw_spectrum. diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 171b36bd8a4e..0e86a581d45b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -29,7 +29,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_mr_tcam.o spectrum_mr.o \ spectrum_qdisc.o spectrum_span.o \ spectrum_nve.o spectrum_nve_vxlan.o \ - spectrum_dpipe.o + spectrum_dpipe.o spectrum_trap.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 17ceac7505e5..14dcc786926d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -80,7 +80,6 @@ struct mlxsw_core { struct mlxsw_thermal *thermal; struct mlxsw_core_port *ports; unsigned int max_ports; - bool reload_fail; bool fw_flash_in_progress; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ @@ -984,23 +983,29 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, return 0; } -static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, - struct netlink_ext_ack *extack) +static int +mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - int err; if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) return -EOPNOTSUPP; mlxsw_core_bus_device_unregister(mlxsw_core, true); - err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, - mlxsw_core->bus, - mlxsw_core->bus_priv, true, - devlink); - mlxsw_core->reload_fail = !!err; + return 0; +} - return err; +static int +mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, + struct netlink_ext_ack *extack) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + + return mlxsw_core_bus_device_register(mlxsw_core->bus_info, + mlxsw_core->bus, + mlxsw_core->bus_priv, true, + devlink); } static int mlxsw_devlink_flash_update(struct devlink *devlink, @@ -1017,8 +1022,57 @@ static int mlxsw_devlink_flash_update(struct devlink *devlink, component, extack); } +static int mlxsw_devlink_trap_init(struct devlink *devlink, + const struct devlink_trap *trap, + void *trap_ctx) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->trap_init) + return -EOPNOTSUPP; + return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx); +} + +static void mlxsw_devlink_trap_fini(struct devlink *devlink, + const struct devlink_trap *trap, + void *trap_ctx) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->trap_fini) + return; + mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx); +} + +static int mlxsw_devlink_trap_action_set(struct devlink *devlink, + const struct devlink_trap *trap, + enum devlink_trap_action action) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->trap_action_set) + return -EOPNOTSUPP; + return mlxsw_driver->trap_action_set(mlxsw_core, trap, action); +} + +static int +mlxsw_devlink_trap_group_init(struct devlink *devlink, + const struct devlink_trap_group *group) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->trap_group_init) + return -EOPNOTSUPP; + return mlxsw_driver->trap_group_init(mlxsw_core, group); +} + static const struct devlink_ops mlxsw_devlink_ops = { - .reload = mlxsw_devlink_core_bus_device_reload, + .reload_down = mlxsw_devlink_core_bus_device_reload_down, + .reload_up = mlxsw_devlink_core_bus_device_reload_up, .port_type_set = mlxsw_devlink_port_type_set, .port_split = mlxsw_devlink_port_split, .port_unsplit = mlxsw_devlink_port_unsplit, @@ -1034,6 +1088,10 @@ static const struct devlink_ops mlxsw_devlink_ops = { .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, .info_get = mlxsw_devlink_info_get, .flash_update = mlxsw_devlink_flash_update, + .trap_init = mlxsw_devlink_trap_init, + .trap_fini = mlxsw_devlink_trap_fini, + .trap_action_set = mlxsw_devlink_trap_action_set, + .trap_group_init = mlxsw_devlink_trap_group_init, }; static int @@ -1191,7 +1249,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, { struct devlink *devlink = priv_to_devlink(mlxsw_core); - if (mlxsw_core->reload_fail) { + if (devlink_is_reload_failed(devlink)) { if (!reload) /* Only the parts that were not de-initialized in the * failed reload attempt need to be de-initialized. @@ -1477,6 +1535,18 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_trap_unregister); +int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core, + const struct mlxsw_listener *listener, + enum mlxsw_reg_hpkt_action action) +{ + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + + mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id, + listener->trap_group, listener->is_ctrl); + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); +} +EXPORT_SYMBOL(mlxsw_core_trap_action_set); + static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) { return atomic64_inc_return(&mlxsw_core->emad.tid); @@ -1794,11 +1864,12 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_res_get); -int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, - u32 port_number, bool split, - u32 split_port_subnumber, - const unsigned char *switch_id, - unsigned char switch_id_len) +static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, + enum devlink_port_flavour flavour, + u32 port_number, bool split, + u32 split_port_subnumber, + const unsigned char *switch_id, + unsigned char switch_id_len) { struct devlink *devlink = priv_to_devlink(mlxsw_core); struct mlxsw_core_port *mlxsw_core_port = @@ -1807,17 +1878,16 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, int err; mlxsw_core_port->local_port = local_port; - devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - port_number, split, split_port_subnumber, + devlink_port_attrs_set(devlink_port, flavour, port_number, + split, split_port_subnumber, switch_id, switch_id_len); err = devlink_port_register(devlink, devlink_port, local_port); if (err) memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); return err; } -EXPORT_SYMBOL(mlxsw_core_port_init); -void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) +static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) { struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; @@ -1826,8 +1896,53 @@ void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) devlink_port_unregister(devlink_port); memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); } + +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, + u32 port_number, bool split, + u32 split_port_subnumber, + const unsigned char *switch_id, + unsigned char switch_id_len) +{ + return __mlxsw_core_port_init(mlxsw_core, local_port, + DEVLINK_PORT_FLAVOUR_PHYSICAL, + port_number, split, split_port_subnumber, + switch_id, switch_id_len); +} +EXPORT_SYMBOL(mlxsw_core_port_init); + +void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) +{ + __mlxsw_core_port_fini(mlxsw_core, local_port); +} EXPORT_SYMBOL(mlxsw_core_port_fini); +int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, + void *port_driver_priv, + const unsigned char *switch_id, + unsigned char switch_id_len) +{ + struct mlxsw_core_port *mlxsw_core_port = + &mlxsw_core->ports[MLXSW_PORT_CPU_PORT]; + int err; + + err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT, + DEVLINK_PORT_FLAVOUR_CPU, + 0, false, 0, + switch_id, switch_id_len); + if (err) + return err; + + mlxsw_core_port->port_driver_priv = port_driver_priv; + return 0; +} +EXPORT_SYMBOL(mlxsw_core_cpu_port_init); + +void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core) +{ + __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT); +} +EXPORT_SYMBOL(mlxsw_core_cpu_port_fini); + void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv, struct net_device *dev) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 8efcff4b59cb..5d7d2ab6d155 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -128,6 +128,9 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, const struct mlxsw_listener *listener, void *priv); +int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core, + const struct mlxsw_listener *listener, + enum mlxsw_reg_hpkt_action action); typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload, size_t payload_len, unsigned long cb_priv); @@ -174,6 +177,11 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, const unsigned char *switch_id, unsigned char switch_id_len); void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port); +int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, + void *port_driver_priv, + const unsigned char *switch_id, + unsigned char switch_id_len); +void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core); void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv, struct net_device *dev); void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, @@ -289,6 +297,15 @@ struct mlxsw_driver { int (*flash_update)(struct mlxsw_core *mlxsw_core, const char *file_name, const char *component, struct netlink_ext_ack *extack); + int (*trap_init)(struct mlxsw_core *mlxsw_core, + const struct devlink_trap *trap, void *trap_ctx); + void (*trap_fini)(struct mlxsw_core *mlxsw_core, + const struct devlink_trap *trap, void *trap_ctx); + int (*trap_action_set)(struct mlxsw_core *mlxsw_core, + const struct devlink_trap *trap, + enum devlink_trap_action action); + int (*trap_group_init)(struct mlxsw_core *mlxsw_core, + const struct devlink_trap_group *group); void (*txhdr_construct)(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); int (*resources_register)(struct mlxsw_core *mlxsw_core); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 946339e13eb9..5b1323645a5d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -9,6 +9,7 @@ #define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738 #define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84 #define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c +#define PCI_DEVICE_ID_MELLANOX_SPECTRUM3 0xcf70 #define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20 #define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08 diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index ead36702549a..5494cf93f34c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4126,7 +4126,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32); #define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5) #define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6) #define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7) -#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4 BIT(8) #define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12) #define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13) #define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14) @@ -5422,6 +5421,14 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR, MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0, MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1, + + __MLXSW_REG_HTGT_TRAP_GROUP_MAX, + MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1 +}; + +enum mlxsw_reg_htgt_discard_trap_group { + MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX, + MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS, }; /* reg_htgt_trap_group @@ -5559,6 +5566,8 @@ enum mlxsw_reg_hpkt_action { MLXSW_REG_HPKT_ACTION_DISCARD, MLXSW_REG_HPKT_ACTION_SOFT_DISCARD, MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD, + MLXSW_REG_HPKT_ACTION_TRAP_EXCEPTION_TO_CPU, + MLXSW_REG_HPKT_ACTION_SET_FW_DEFAULT = 15, }; /* reg_hpkt_action @@ -5569,6 +5578,8 @@ enum mlxsw_reg_hpkt_action { * 3 - Discard. * 4 - Soft discard (allow other traps to act on the packet). * 5 - Trap and soft discard (allow other traps to overwrite this trap). + * 6 - Trap to CPU (CPU receives sole copy) and count it as error. + * 15 - Restore the firmware's default action. * Access: RW * * Note: Must be set to 0 (forward) for event trap IDs, as they are already diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index eda9c23e87b2..dd234cf7b39d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -48,7 +48,7 @@ #define MLXSW_SP1_FWREV_MAJOR 13 #define MLXSW_SP1_FWREV_MINOR 2000 -#define MLXSW_SP1_FWREV_SUBMINOR 1122 +#define MLXSW_SP1_FWREV_SUBMINOR 1886 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@ -65,6 +65,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; +static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; static const char mlxsw_sp_driver_version[] = "1.0"; static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { @@ -174,6 +175,10 @@ struct mlxsw_sp_ptp_ops { void (*shaper_work)(struct work_struct *work); int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, struct ethtool_ts_info *info); + int (*get_stats_count)(void); + void (*get_stats_strings)(u8 **p); + void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, + u64 *data, int data_index); }; static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, @@ -1625,7 +1630,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, } flow_block_cb_incref(block_cb); err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, - mlxsw_sp_port, ingress); + mlxsw_sp_port, ingress, f->extack); if (err) goto err_block_bind; @@ -2328,6 +2333,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) static void mlxsw_sp_port_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); u8 *p = data; int i; @@ -2369,6 +2375,7 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, for (i = 0; i < TC_MAX_QUEUE; i++) mlxsw_sp_port_get_tc_strings(&p, i); + mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); break; } } @@ -2463,6 +2470,7 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev, static void mlxsw_sp_port_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int i, data_index = 0; /* IEEE 802.3 Counters */ @@ -2503,13 +2511,21 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev, data, data_index); data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; } + + /* PTP counters */ + mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, + data, data_index); + data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); } static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) { + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + switch (sset) { case ETH_SS_STATS: - return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; + return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + + mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); default: return -EOPNOTSUPP; } @@ -2608,26 +2624,6 @@ static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { .speed = SPEED_50000, }, { - .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, - .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, - .speed = SPEED_56000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, - .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, - .speed = SPEED_56000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, - .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, - .speed = SPEED_56000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, - .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, - .speed = SPEED_56000, - }, - { .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, .speed = SPEED_100000, @@ -2674,7 +2670,7 @@ mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, - unsigned long *mode) + u8 width, unsigned long *mode) { int i; @@ -2715,7 +2711,7 @@ mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, } static u32 -mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, +mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, const struct ethtool_link_ksettings *cmd) { u32 ptys_proto = 0; @@ -2729,7 +2725,8 @@ mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, return ptys_proto; } -static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) +static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, + u32 speed) { u32 ptys_proto = 0; int i; @@ -2917,11 +2914,31 @@ mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) +#define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) +#define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) +#define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) + +static u8 mlxsw_sp_port_mask_width_get(u8 width) +{ + switch (width) { + case 1: + return MLXSW_SP_PORT_MASK_WIDTH_1X; + case 2: + return MLXSW_SP_PORT_MASK_WIDTH_2X; + case 4: + return MLXSW_SP_PORT_MASK_WIDTH_4X; + default: + WARN_ON_ONCE(1); + return 0; + } +} + struct mlxsw_sp2_port_link_mode { const enum ethtool_link_mode_bit_indices *mask_ethtool; int m_ethtool_len; u32 mask; u32 speed; + u8 mask_width; }; static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { @@ -2929,72 +2946,97 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X, .speed = SPEED_100, }, { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X, .speed = SPEED_1000, }, { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X, .speed = SPEED_2500, }, { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X, .speed = SPEED_5000, }, { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X, .speed = SPEED_10000, }, { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X, .speed = SPEED_40000, }, { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X, .speed = SPEED_25000, }, { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X, .speed = SPEED_50000, }, { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, .speed = SPEED_50000, }, { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X, .speed = SPEED_100000, }, { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, .speed = SPEED_100000, }, { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X, .speed = SPEED_200000, }, }; @@ -3022,12 +3064,14 @@ mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, static void mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, - unsigned long *mode) + u8 width, unsigned long *mode) { + u8 mask_width = mlxsw_sp_port_mask_width_get(width); int i; for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { - if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) + if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && + (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], mode); } @@ -3078,27 +3122,32 @@ mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, } static u32 -mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, +mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, const struct ethtool_link_ksettings *cmd) { + u8 mask_width = mlxsw_sp_port_mask_width_get(width); u32 ptys_proto = 0; int i; for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { - if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], + if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && + mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], cmd->link_modes.advertising)) ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; } return ptys_proto; } -static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) +static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, + u8 width, u32 speed) { + u8 mask_width = mlxsw_sp_port_mask_width_get(width); u32 ptys_proto = 0; int i; for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { - if (speed == mlxsw_sp2_port_link_mode[i].speed) + if ((speed == mlxsw_sp2_port_link_mode[i].speed) && + (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; } return ptys_proto; @@ -3182,7 +3231,7 @@ mlxsw_sp2_port_type_speed_ops = { static void mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, - struct ethtool_link_ksettings *cmd) + u8 width, struct ethtool_link_ksettings *cmd) { const struct mlxsw_sp_port_type_speed_ops *ops; @@ -3193,12 +3242,13 @@ mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); - ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported); + ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, + cmd->link_modes.supported); } static void mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, - u32 eth_proto_admin, bool autoneg, + u32 eth_proto_admin, bool autoneg, u8 width, struct ethtool_link_ksettings *cmd) { const struct mlxsw_sp_port_type_speed_ops *ops; @@ -3209,7 +3259,7 @@ mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, return; ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); - ops->from_ptys_link(mlxsw_sp, eth_proto_admin, + ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, cmd->link_modes.advertising); } @@ -3264,10 +3314,11 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, ð_proto_admin, ð_proto_oper); - mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd); + mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, + mlxsw_sp_port->mapping.width, cmd); mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, - cmd); + mlxsw_sp_port->mapping.width, cmd); cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); @@ -3300,13 +3351,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); autoneg = cmd->base.autoneg == AUTONEG_ENABLE; - if (!autoneg && cmd->base.speed == SPEED_56000) { - netdev_err(dev, "56G not supported with autoneg off\n"); - return -EINVAL; - } eth_proto_new = autoneg ? - ops->to_ptys_advert_link(mlxsw_sp, cmd) : - ops->to_ptys_speed(mlxsw_sp, cmd->base.speed); + ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, + cmd) : + ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, + cmd->base.speed); eth_proto_new = eth_proto_new & eth_proto_cap; if (!eth_proto_new) { @@ -3823,6 +3872,45 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_core_port_fini(mlxsw_sp->core, local_port); } +static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + int err; + + mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); + if (!mlxsw_sp_port) + return -ENOMEM; + + mlxsw_sp_port->mlxsw_sp = mlxsw_sp; + mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; + + err = mlxsw_core_cpu_port_init(mlxsw_sp->core, + mlxsw_sp_port, + mlxsw_sp->base_mac, + sizeof(mlxsw_sp->base_mac)); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); + goto err_core_cpu_port_init; + } + + mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; + return 0; + +err_core_cpu_port_init: + kfree(mlxsw_sp_port); + return err; +} + +static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; + + mlxsw_core_cpu_port_fini(mlxsw_sp->core); + mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; + kfree(mlxsw_sp_port); +} + static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) { return mlxsw_sp->ports[local_port] != NULL; @@ -3835,6 +3923,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) if (mlxsw_sp_port_created(mlxsw_sp, i)) mlxsw_sp_port_remove(mlxsw_sp, i); + mlxsw_sp_cpu_port_remove(mlxsw_sp); kfree(mlxsw_sp->port_to_module); kfree(mlxsw_sp->ports); } @@ -3859,6 +3948,10 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) goto err_port_to_module_alloc; } + err = mlxsw_sp_cpu_port_create(mlxsw_sp); + if (err) + goto err_cpu_port_create; + for (i = 1; i < max_ports; i++) { /* Mark as invalid */ mlxsw_sp->port_to_module[i] = -1; @@ -3882,6 +3975,8 @@ err_port_module_info_get: for (i--; i >= 1; i--) if (mlxsw_sp_port_created(mlxsw_sp, i)) mlxsw_sp_port_remove(mlxsw_sp, i); + mlxsw_sp_cpu_port_remove(mlxsw_sp); +err_cpu_port_create: kfree(mlxsw_sp->port_to_module); err_port_to_module_alloc: kfree(mlxsw_sp->ports); @@ -4609,6 +4704,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, .shaper_work = mlxsw_sp1_ptp_shaper_work, .get_ts_info = mlxsw_sp1_ptp_get_ts_info, + .get_stats_count = mlxsw_sp1_get_stats_count, + .get_stats_strings = mlxsw_sp1_get_stats_strings, + .get_stats = mlxsw_sp1_get_stats, }; static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { @@ -4622,6 +4720,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, .shaper_work = mlxsw_sp2_ptp_shaper_work, .get_ts_info = mlxsw_sp2_ptp_get_ts_info, + .get_stats_count = mlxsw_sp2_get_stats_count, + .get_stats_strings = mlxsw_sp2_get_stats_strings, + .get_stats = mlxsw_sp2_get_stats, }; static int mlxsw_sp_netdevice_event(struct notifier_block *unused, @@ -4664,6 +4765,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_traps_init; } + err = mlxsw_sp_devlink_traps_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); + goto err_devlink_traps_init; + } + err = mlxsw_sp_buffers_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); @@ -4797,6 +4904,8 @@ err_span_init: err_lag_init: mlxsw_sp_buffers_fini(mlxsw_sp); err_buffers_init: + mlxsw_sp_devlink_traps_fini(mlxsw_sp); +err_devlink_traps_init: mlxsw_sp_traps_fini(mlxsw_sp); err_traps_init: mlxsw_sp_fids_fini(mlxsw_sp); @@ -4869,6 +4978,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_lag_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); + mlxsw_sp_devlink_traps_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_fids_fini(mlxsw_sp); mlxsw_sp_kvdl_fini(mlxsw_sp); @@ -5026,6 +5136,26 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) return 0; } +static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + struct devlink_resource_size_params kvd_size_params; + u32 kvd_size; + + if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) + return -EIO; + + kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); + devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); + + return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, + kvd_size, MLXSW_SP_RESOURCE_KVD, + DEVLINK_RESOURCE_ID_PARENT_TOP, + &kvd_size_params); +} + static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) { return mlxsw_sp1_resources_kvd_register(mlxsw_core); @@ -5033,7 +5163,7 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) { - return 0; + return mlxsw_sp2_resources_kvd_register(mlxsw_core); } static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, @@ -5230,6 +5360,10 @@ static struct mlxsw_driver mlxsw_sp1_driver = { .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, .flash_update = mlxsw_sp_flash_update, + .trap_init = mlxsw_sp_trap_init, + .trap_fini = mlxsw_sp_trap_fini, + .trap_action_set = mlxsw_sp_trap_action_set, + .trap_group_init = mlxsw_sp_trap_group_init, .txhdr_construct = mlxsw_sp_txhdr_construct, .resources_register = mlxsw_sp1_resources_register, .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, @@ -5260,6 +5394,43 @@ static struct mlxsw_driver mlxsw_sp2_driver = { .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, .flash_update = mlxsw_sp_flash_update, + .trap_init = mlxsw_sp_trap_init, + .trap_fini = mlxsw_sp_trap_fini, + .trap_action_set = mlxsw_sp_trap_action_set, + .trap_group_init = mlxsw_sp_trap_group_init, + .txhdr_construct = mlxsw_sp_txhdr_construct, + .resources_register = mlxsw_sp2_resources_register, + .params_register = mlxsw_sp2_params_register, + .params_unregister = mlxsw_sp2_params_unregister, + .ptp_transmitted = mlxsw_sp_ptp_transmitted, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sp2_config_profile, + .res_query_enabled = true, +}; + +static struct mlxsw_driver mlxsw_sp3_driver = { + .kind = mlxsw_sp3_driver_name, + .priv_size = sizeof(struct mlxsw_sp), + .init = mlxsw_sp2_init, + .fini = mlxsw_sp_fini, + .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, + .port_split = mlxsw_sp_port_split, + .port_unsplit = mlxsw_sp_port_unsplit, + .sb_pool_get = mlxsw_sp_sb_pool_get, + .sb_pool_set = mlxsw_sp_sb_pool_set, + .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, + .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, + .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, + .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, + .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, + .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, + .flash_update = mlxsw_sp_flash_update, + .trap_init = mlxsw_sp_trap_init, + .trap_fini = mlxsw_sp_trap_fini, + .trap_action_set = mlxsw_sp_trap_action_set, + .trap_group_init = mlxsw_sp_trap_group_init, .txhdr_construct = mlxsw_sp_txhdr_construct, .resources_register = mlxsw_sp2_resources_register, .params_register = mlxsw_sp2_params_register, @@ -6304,6 +6475,16 @@ static struct pci_driver mlxsw_sp2_pci_driver = { .id_table = mlxsw_sp2_pci_id_table, }; +static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, + {0, }, +}; + +static struct pci_driver mlxsw_sp3_pci_driver = { + .name = mlxsw_sp3_driver_name, + .id_table = mlxsw_sp3_pci_id_table, +}; + static int __init mlxsw_sp_module_init(void) { int err; @@ -6319,6 +6500,10 @@ static int __init mlxsw_sp_module_init(void) if (err) goto err_sp2_core_driver_register; + err = mlxsw_core_driver_register(&mlxsw_sp3_driver); + if (err) + goto err_sp3_core_driver_register; + err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); if (err) goto err_sp1_pci_driver_register; @@ -6327,11 +6512,19 @@ static int __init mlxsw_sp_module_init(void) if (err) goto err_sp2_pci_driver_register; + err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); + if (err) + goto err_sp3_pci_driver_register; + return 0; +err_sp3_pci_driver_register: + mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); err_sp2_pci_driver_register: mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); err_sp1_pci_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sp3_driver); +err_sp3_core_driver_register: mlxsw_core_driver_unregister(&mlxsw_sp2_driver); err_sp2_core_driver_register: mlxsw_core_driver_unregister(&mlxsw_sp1_driver); @@ -6343,8 +6536,10 @@ err_sp1_core_driver_register: static void __exit mlxsw_sp_module_exit(void) { + mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); + mlxsw_core_driver_unregister(&mlxsw_sp3_driver); mlxsw_core_driver_unregister(&mlxsw_sp2_driver); mlxsw_core_driver_unregister(&mlxsw_sp1_driver); unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); @@ -6359,4 +6554,5 @@ MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Spectrum driver"); MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); +MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 6664119fb0c8..b2a0028b1694 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -225,6 +225,16 @@ struct mlxsw_sp_port_xstats { u64 tx_packets[IEEE_8021QAZ_MAX_TCS]; }; +struct mlxsw_sp_ptp_port_dir_stats { + u64 packets; + u64 timestamps; +}; + +struct mlxsw_sp_ptp_port_stats { + struct mlxsw_sp_ptp_port_dir_stats rx_gcd; + struct mlxsw_sp_ptp_port_dir_stats tx_gcd; +}; + struct mlxsw_sp_port { struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; @@ -271,6 +281,7 @@ struct mlxsw_sp_port { struct hwtstamp_config hwtstamp_config; u16 ing_types; u16 egr_types; + struct mlxsw_sp_ptp_port_stats stats; } ptp; }; @@ -279,14 +290,14 @@ struct mlxsw_sp_port_type_speed_ops { u32 ptys_eth_proto, struct ethtool_link_ksettings *cmd); void (*from_ptys_link)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, - unsigned long *mode); + u8 width, unsigned long *mode); u32 (*from_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto); void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, u32 ptys_eth_proto, struct ethtool_link_ksettings *cmd); - u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, + u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width, const struct ethtool_link_ksettings *cmd); - u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 speed); + u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed); u32 (*to_ptys_upper_speed)(struct mlxsw_sp *mlxsw_sp, u32 upper_speed); int (*port_speed_base)(struct mlxsw_sp *mlxsw_sp, u8 local_port, u32 *base_speed); @@ -623,7 +634,8 @@ struct mlxsw_sp_acl_rule_info { unsigned int priority; struct mlxsw_afk_element_values values; struct mlxsw_afa_block *act_block; - u8 action_created:1; + u8 action_created:1, + egress_bind_blocker:1; unsigned int counter_index; }; @@ -642,6 +654,7 @@ struct mlxsw_sp_acl_block { struct mlxsw_sp *mlxsw_sp; unsigned int rule_count; unsigned int disable_count; + unsigned int egress_blocker_rule_count; struct net *net; }; @@ -657,7 +670,8 @@ void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block); int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_port *mlxsw_sp_port, - bool ingress); + bool ingress, + struct netlink_ext_ack *extack); int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_port *mlxsw_sp_port, @@ -955,4 +969,17 @@ void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp); +/* spectrum_trap.c */ +int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core, + const struct devlink_trap *trap, void *trap_ctx); +void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core, + const struct devlink_trap *trap, void *trap_ctx); +int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core, + const struct devlink_trap *trap, + enum devlink_trap_action action); +int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core, + const struct devlink_trap_group *group); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 84a87d059333..150b3a144b83 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -239,7 +239,8 @@ mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block, int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_port *mlxsw_sp_port, - bool ingress) + bool ingress, + struct netlink_ext_ack *extack) { struct mlxsw_sp_acl_block_binding *binding; int err; @@ -247,6 +248,11 @@ int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp, if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress))) return -EEXIST; + if (!ingress && block->egress_blocker_rule_count) { + NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules"); + return -EOPNOTSUPP; + } + binding = kzalloc(sizeof(*binding), GFP_KERNEL); if (!binding) return -ENOMEM; @@ -672,6 +678,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl_block *block = ruleset->ht_key.block; int err; err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei); @@ -689,14 +696,14 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, * one, to be directly bound to device. The rest of the * rulesets are bound by "Goto action set". */ - err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, - ruleset->ht_key.block); + err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block); if (err) goto err_ruleset_block_bind; } list_add_tail(&rule->list, &mlxsw_sp->acl->rules); - ruleset->ht_key.block->rule_count++; + block->rule_count++; + block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker; return 0; err_ruleset_block_bind: @@ -712,7 +719,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl_block *block = ruleset->ht_key.block; + block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker; ruleset->ht_key.block->rule_count--; list_del(&rule->list); if (!ruleset->ht_key.chain_index && diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 888ba4300bcc..b9eeae37a4dc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -250,6 +250,10 @@ static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port, &mlxsw_sp->sb_vals->pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; + if (local_port == MLXSW_PORT_CPU_PORT && + des->dir == MLXSW_REG_SBXX_DIR_INGRESS) + return 0; + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, true, 0, 0); return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, @@ -273,6 +277,10 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, char sbpm_pl[MLXSW_REG_SBPM_LEN]; struct mlxsw_sp_sb_pm *pm; + if (local_port == MLXSW_PORT_CPU_PORT && + des->dir == MLXSW_REG_SBXX_DIR_INGRESS) + return 0; + pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false, 0, 0); @@ -1085,6 +1093,11 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, u32 max_buff; int err; + if (local_port == MLXSW_PORT_CPU_PORT) { + NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden"); + return -EINVAL; + } + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, threshold, &max_buff, extack); if (err) @@ -1130,6 +1143,11 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, u32 max_buff; int err; + if (local_port == MLXSW_PORT_CPU_PORT) { + NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden"); + return -EINVAL; + } + if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) { NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden"); return -EINVAL; @@ -1187,6 +1205,11 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; + if (local_port == MLXSW_PORT_CPU_PORT) { + /* Ingress quotas are not supported for the CPU port */ + masked_count++; + continue; + } for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) { cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, MLXSW_REG_SBXX_DIR_INGRESS); @@ -1222,7 +1245,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, char *sbsr_pl; u8 masked_count; u8 local_port_1; - u8 local_port = 0; + u8 local_port; int i; int err; int err2; @@ -1231,8 +1254,8 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, if (!sbsr_pl) return -ENOMEM; + local_port = MLXSW_PORT_CPU_PORT; next_batch: - local_port++; local_port_1 = local_port; masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, false); @@ -1243,7 +1266,11 @@ next_batch: for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; - mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); + if (local_port != MLXSW_PORT_CPU_PORT) { + /* Ingress quotas are not supported for the CPU port */ + mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, + local_port, 1); + } mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, @@ -1264,8 +1291,10 @@ do_query: cb_priv); if (err) goto out; - if (local_port < mlxsw_core_max_ports(mlxsw_core)) + if (local_port < mlxsw_core_max_ports(mlxsw_core)) { + local_port++; goto next_batch; + } out: err2 = mlxsw_reg_trans_bulk_wait(&bulk_list); @@ -1282,7 +1311,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, LIST_HEAD(bulk_list); char *sbsr_pl; unsigned int masked_count; - u8 local_port = 0; + u8 local_port; int i; int err; int err2; @@ -1291,8 +1320,8 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, if (!sbsr_pl) return -ENOMEM; + local_port = MLXSW_PORT_CPU_PORT; next_batch: - local_port++; masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, true); for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) @@ -1302,7 +1331,11 @@ next_batch: for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; - mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); + if (local_port != MLXSW_PORT_CPU_PORT) { + /* Ingress quotas are not supported for the CPU port */ + mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, + local_port, 1); + } mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, @@ -1319,8 +1352,10 @@ do_query: &bulk_list, NULL, 0); if (err) goto out; - if (local_port < mlxsw_core_max_ports(mlxsw_core)) + if (local_port < mlxsw_core_max_ports(mlxsw_core)) { + local_port++; goto next_batch; + } out: err2 = mlxsw_reg_trans_bulk_wait(&bulk_list); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 202e9a246019..0ad1a24abfc6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -78,6 +78,16 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid; u16 fid_index; + if (mlxsw_sp_acl_block_is_egress_bound(block)) { + NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress"); + return -EOPNOTSUPP; + } + + /* Forbid block with this rulei to be bound + * to egress in future. + */ + rulei->egress_bind_blocker = 1; + fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); fid_index = mlxsw_sp_fid_index(fid); err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, @@ -257,6 +267,12 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, flow_rule_match_tcp(rule, &match); + if (match.mask->flags & htons(0x0E00)) { + NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits"); + dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n"); + return -EINVAL; + } + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, ntohs(match.key->flags), ntohs(match.mask->flags)); @@ -390,6 +406,12 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); return -EOPNOTSUPP; } + + /* Forbid block with this rulei to be bound + * to egress in future. + */ + rulei->egress_bind_blocker = 1; + if (match.mask->vlan_id != 0) mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VID, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index 38bb1cfe4e8c..ec2ff3d7f41c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -630,6 +630,8 @@ static void mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state, struct mlxsw_sp1_ptp_unmatched *unmatched) { + struct mlxsw_sp_ptp_port_dir_stats *stats; + struct mlxsw_sp_port *mlxsw_sp_port; int err; /* If an unmatched entry has an SKB, it has to be handed over to the @@ -650,6 +652,17 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state, /* The packet was matched with timestamp during the walk. */ goto out; + mlxsw_sp_port = ptp_state->mlxsw_sp->ports[unmatched->key.local_port]; + if (mlxsw_sp_port) { + stats = unmatched->key.ingress ? + &mlxsw_sp_port->ptp.stats.rx_gcd : + &mlxsw_sp_port->ptp.stats.tx_gcd; + if (unmatched->skb) + stats->packets++; + else + stats->timestamps++; + } + /* mlxsw_sp1_ptp_unmatched_finish() invokes netif_receive_skb(). While * the comment at that function states that it can only be called in * soft IRQ context, this pattern of local_bh_disable() + @@ -1098,3 +1111,57 @@ int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, return 0; } + +struct mlxsw_sp_ptp_port_stat { + char str[ETH_GSTRING_LEN]; + ptrdiff_t offset; +}; + +#define MLXSW_SP_PTP_PORT_STAT(NAME, FIELD) \ + { \ + .str = NAME, \ + .offset = offsetof(struct mlxsw_sp_ptp_port_stats, \ + FIELD), \ + } + +static const struct mlxsw_sp_ptp_port_stat mlxsw_sp_ptp_port_stats[] = { + MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_packets", rx_gcd.packets), + MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_timestamps", rx_gcd.timestamps), + MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_packets", tx_gcd.packets), + MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_timestamps", tx_gcd.timestamps), +}; + +#undef MLXSW_SP_PTP_PORT_STAT + +#define MLXSW_SP_PTP_PORT_STATS_LEN \ + ARRAY_SIZE(mlxsw_sp_ptp_port_stats) + +int mlxsw_sp1_get_stats_count(void) +{ + return MLXSW_SP_PTP_PORT_STATS_LEN; +} + +void mlxsw_sp1_get_stats_strings(u8 **p) +{ + int i; + + for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) { + memcpy(*p, mlxsw_sp_ptp_port_stats[i].str, + ETH_GSTRING_LEN); + *p += ETH_GSTRING_LEN; + } +} + +void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port, + u64 *data, int data_index) +{ + void *stats = &mlxsw_sp_port->ptp.stats; + ptrdiff_t offset; + int i; + + data += data_index; + for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) { + offset = mlxsw_sp_ptp_port_stats[i].offset; + *data++ = *(u64 *)(stats + offset); + } +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h index 72e55f6926b9..8c386571afce 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h @@ -59,6 +59,11 @@ void mlxsw_sp1_ptp_shaper_work(struct work_struct *work); int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, struct ethtool_ts_info *info); +int mlxsw_sp1_get_stats_count(void); +void mlxsw_sp1_get_stats_strings(u8 **p); +void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port, + u64 *data, int data_index); + #else static inline struct mlxsw_sp_ptp_clock * @@ -125,6 +130,19 @@ static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_ptp_get_ts_info_noptp(info); } +static inline int mlxsw_sp1_get_stats_count(void) +{ + return 0; +} + +static inline void mlxsw_sp1_get_stats_strings(u8 **p) +{ +} + +static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port, + u64 *data, int data_index) +{ +} #endif static inline struct mlxsw_sp_ptp_clock * @@ -183,4 +201,18 @@ static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_ptp_get_ts_info_noptp(info); } +static inline int mlxsw_sp2_get_stats_count(void) +{ + return 0; +} + +static inline void mlxsw_sp2_get_stats_strings(u8 **p) +{ +} + +static inline void mlxsw_sp2_get_stats(struct mlxsw_sp_port *mlxsw_sp_port, + u64 *data, int data_index) +{ +} + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index e618be7ce6c6..a330b369e899 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2943,7 +2943,7 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed) val = nh_grp->count; for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; - val ^= nh->ifindex; + val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed); } return jhash(&val, sizeof(val), seed); default: @@ -2961,7 +2961,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed) list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev; - val ^= dev->ifindex; + val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed); } return jhash(&val, sizeof(val), seed); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c new file mode 100644 index 000000000000..899450b28621 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */ + +#include <linux/kernel.h> +#include <net/devlink.h> +#include <uapi/linux/devlink.h> + +#include "core.h" +#include "reg.h" +#include "spectrum.h" + +#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT + +static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port, + void *priv); + +#define MLXSW_SP_TRAP_DROP(_id, _group_id) \ + DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \ + DEVLINK_TRAP_GROUP_GENERIC(_group_id), \ + MLXSW_SP_TRAP_METADATA) + +#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \ + MLXSW_RXL(mlxsw_sp_rx_drop_listener, DISCARD_##_id, SET_FW_DEFAULT, \ + false, SP_##_group_id, DISCARD) + +static struct devlink_trap mlxsw_sp_traps_arr[] = { + MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS), + MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS), + MLXSW_SP_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS), + MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS), + MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS), + MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS), +}; + +static struct mlxsw_listener mlxsw_sp_listeners_arr[] = { + MLXSW_SP_RXL_DISCARD(ING_PACKET_SMAC_MC, L2_DISCARDS), + MLXSW_SP_RXL_DISCARD(ING_SWITCH_VTAG_ALLOW, L2_DISCARDS), + MLXSW_SP_RXL_DISCARD(ING_SWITCH_VLAN, L2_DISCARDS), + MLXSW_SP_RXL_DISCARD(ING_SWITCH_STP, L2_DISCARDS), + MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS), + MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS), + MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS), +}; + +/* Mapping between hardware trap and devlink trap. Multiple hardware traps can + * be mapped to the same devlink trap. Order is according to + * 'mlxsw_sp_listeners_arr'. + */ +static u16 mlxsw_sp_listener_devlink_map[] = { + DEVLINK_TRAP_GENERIC_ID_SMAC_MC, + DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH, + DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER, + DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER, + DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST, + DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST, + DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER, +}; + +static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, + u8 local_port, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp_port_pcpu_stats *pcpu_stats; + + if (unlikely(!mlxsw_sp_port)) { + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", + local_port); + kfree_skb(skb); + return -EINVAL; + } + + skb->dev = mlxsw_sp_port->dev; + + pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += skb->len; + u64_stats_update_end(&pcpu_stats->syncp); + + skb->protocol = eth_type_trans(skb, skb->dev); + + return 0; +} + +static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port, + void *trap_ctx) +{ + struct devlink_port *in_devlink_port; + struct mlxsw_sp_port *mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp; + struct devlink *devlink; + + mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + + if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port)) + return; + + devlink = priv_to_devlink(mlxsw_sp->core); + in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core, + local_port); + devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port); + consume_skb(skb); +} + +int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + if (WARN_ON(ARRAY_SIZE(mlxsw_sp_listener_devlink_map) != + ARRAY_SIZE(mlxsw_sp_listeners_arr))) + return -EINVAL; + + return devlink_traps_register(devlink, mlxsw_sp_traps_arr, + ARRAY_SIZE(mlxsw_sp_traps_arr), + mlxsw_sp); +} + +void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + devlink_traps_unregister(devlink, mlxsw_sp_traps_arr, + ARRAY_SIZE(mlxsw_sp_traps_arr)); +} + +int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core, + const struct devlink_trap *trap, void *trap_ctx) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) { + struct mlxsw_listener *listener; + int err; + + if (mlxsw_sp_listener_devlink_map[i] != trap->id) + continue; + listener = &mlxsw_sp_listeners_arr[i]; + + err = mlxsw_core_trap_register(mlxsw_core, listener, trap_ctx); + if (err) + return err; + } + + return 0; +} + +void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core, + const struct devlink_trap *trap, void *trap_ctx) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) { + struct mlxsw_listener *listener; + + if (mlxsw_sp_listener_devlink_map[i] != trap->id) + continue; + listener = &mlxsw_sp_listeners_arr[i]; + + mlxsw_core_trap_unregister(mlxsw_core, listener, trap_ctx); + } +} + +int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core, + const struct devlink_trap *trap, + enum devlink_trap_action action) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) { + enum mlxsw_reg_hpkt_action hw_action; + struct mlxsw_listener *listener; + int err; + + if (mlxsw_sp_listener_devlink_map[i] != trap->id) + continue; + listener = &mlxsw_sp_listeners_arr[i]; + + switch (action) { + case DEVLINK_TRAP_ACTION_DROP: + hw_action = MLXSW_REG_HPKT_ACTION_SET_FW_DEFAULT; + break; + case DEVLINK_TRAP_ACTION_TRAP: + hw_action = MLXSW_REG_HPKT_ACTION_TRAP_EXCEPTION_TO_CPU; + break; + default: + return -EINVAL; + } + + err = mlxsw_core_trap_action_set(mlxsw_core, listener, + hw_action); + if (err) + return err; + } + + return 0; +} + +#define MLXSW_SP_DISCARD_POLICER_ID (MLXSW_REG_HTGT_TRAP_GROUP_MAX + 1) + +static int +mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp, + const struct devlink_trap_group *group) +{ + enum mlxsw_reg_qpcr_ir_units ir_units; + char qpcr_pl[MLXSW_REG_QPCR_LEN]; + u16 policer_id; + u8 burst_size; + bool is_bytes; + u32 rate; + + switch (group->id) { + case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS: + policer_id = MLXSW_SP_DISCARD_POLICER_ID; + ir_units = MLXSW_REG_QPCR_IR_UNITS_M; + is_bytes = false; + rate = 10 * 1024; /* 10Kpps */ + burst_size = 7; + break; + default: + return -EINVAL; + } + + mlxsw_reg_qpcr_pack(qpcr_pl, policer_id, ir_units, is_bytes, rate, + burst_size); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl); +} + +static int +__mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp, + const struct devlink_trap_group *group) +{ + char htgt_pl[MLXSW_REG_HTGT_LEN]; + u8 priority, tc, group_id; + u16 policer_id; + + switch (group->id) { + case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS: + group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS; + policer_id = MLXSW_SP_DISCARD_POLICER_ID; + priority = 0; + tc = 1; + break; + default: + return -EINVAL; + } + + mlxsw_reg_htgt_pack(htgt_pl, group_id, policer_id, priority, tc); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); +} + +int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core, + const struct devlink_trap_group *group) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + int err; + + err = mlxsw_sp_trap_group_policer_init(mlxsw_sp, group); + if (err) + return err; + + err = __mlxsw_sp_trap_group_init(mlxsw_sp, group); + if (err) + return err; + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index bdab96f5bc70..1c14c051ee52 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -637,12 +637,6 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = { .speed = 50000, }, { - .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, - .supported = SUPPORTED_56000baseKR4_Full, - .advertised = ADVERTISED_56000baseKR4_Full, - .speed = 56000, - }, - { .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 19202bdb5105..7618f084cae9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -66,6 +66,13 @@ enum { MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD, MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, + MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140, + MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VTAG_ALLOW = 0x148, + MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VLAN = 0x149, + MLXSW_TRAP_ID_DISCARD_ING_SWITCH_STP = 0x14A, + MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_UC = 0x150, + MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_MC_NULL = 0x151, + MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_LB = 0x152, MLXSW_TRAP_ID_ACL0 = 0x1C0, /* Multicast trap used for routes with trap action */ MLXSW_TRAP_ID_ACL1 = 0x1C1, diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index ccd06702cc56..da329ca115cc 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -580,9 +580,7 @@ out: dma_unmap_single(adapter->dev, sg_dma_address(sg), DMA_BUFFER_SIZE, DMA_FROM_DEVICE); sg_dma_address(sg) = 0; - if (ctl->skb) - dev_kfree_skb(ctl->skb); - + dev_kfree_skb(ctl->skb); ctl->skb = NULL; printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err); diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index e52b015e31a9..a41a90c589db 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1225,7 +1225,6 @@ MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids); static int ks8851_probe(struct platform_device *pdev) { int err; - struct resource *io_d, *io_c; struct net_device *netdev; struct ks_net *ks; u16 id, data; @@ -1240,15 +1239,13 @@ static int ks8851_probe(struct platform_device *pdev) ks = netdev_priv(netdev); ks->netdev = netdev; - io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ks->hw_addr = devm_ioremap_resource(&pdev->dev, io_d); + ks->hw_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ks->hw_addr)) { err = PTR_ERR(ks->hw_addr); goto err_free; } - io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1); - ks->hw_addr_cmd = devm_ioremap_resource(&pdev->dev, io_c); + ks->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(ks->hw_addr_cmd)) { err = PTR_ERR(ks->hw_addr_cmd); goto err_free; diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 3103446f018c..e102e1560ac7 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -2166,7 +2166,7 @@ static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent) num = (data & BROADCAST_STORM_RATE_HI); num <<= 8; num |= (data & BROADCAST_STORM_RATE_LO) >> 8; - num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE; + num = DIV_ROUND_CLOSEST(num * 100, BROADCAST_STORM_VALUE); *percent = (u8) num; } diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 13e6bf13ac4d..a43140f7b5eb 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1434,7 +1434,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, } static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, - const struct skb_frag_struct *fragment, + const skb_frag_t *fragment, unsigned int frame_length) { /* called only from within lan743x_tx_xmit_frame @@ -1607,9 +1607,8 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, goto finish; for (j = 0; j < nr_frags; j++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]); - frag = &(skb_shinfo(skb)->frags[j]); if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) { /* upon error no need to call * lan743x_tx_frame_end @@ -2173,9 +2172,8 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) } count = 0; while (count < weight) { - int rx_process_result = -1; + int rx_process_result = lan743x_rx_process_packet(rx); - rx_process_result = lan743x_rx_process_packet(rx); if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) { count++; } else if (rx_process_result == diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index b2109eca81fd..57b26c2acf87 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -963,8 +963,7 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter) index++) { struct sk_buff *skb = ptp->tx_ts_skb_queue[index]; - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); ptp->tx_ts_skb_queue[index] = NULL; ptp->tx_ts_seconds_queue[index] = 0; ptp->tx_ts_nseconds_queue[index] = 0; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 6932e615d4b0..4d1bce4389c7 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/phy.h> +#include <linux/ptp_clock_kernel.h> #include <linux/skbuff.h> #include <linux/iopoll.h> #include <net/arp.h> @@ -538,7 +539,7 @@ static int ocelot_port_stop(struct net_device *dev) */ static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info) { - ifh[0] = IFH_INJ_BYPASS; + ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21); ifh[1] = (0xf00 & info->port) >> 8; ifh[2] = (0xff & info->port) << 24; ifh[3] = (info->tag_type << 16) | info->vid; @@ -548,6 +549,7 @@ static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info) static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) { + struct skb_shared_info *shinfo = skb_shinfo(skb); struct ocelot_port *port = netdev_priv(dev); struct ocelot *ocelot = port->ocelot; u32 val, ifh[IFH_LEN]; @@ -566,6 +568,14 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) info.port = BIT(port->chip_port); info.tag_type = IFH_TAG_TYPE_C; info.vid = skb_vlan_tag_get(skb); + + /* Check if timestamping is needed */ + if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) { + info.rew_op = port->ptp_cmd; + if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) + info.rew_op |= (port->ts_id % 4) << 3; + } + ocelot_gen_ifh(ifh, &info); for (i = 0; i < IFH_LEN; i++) @@ -596,11 +606,58 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - dev_kfree_skb_any(skb); + if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP && + port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { + struct ocelot_skb *oskb = + kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC); + + if (unlikely(!oskb)) + goto out; + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + oskb->skb = skb; + oskb->id = port->ts_id % 4; + port->ts_id++; + + list_add_tail(&oskb->head, &port->skbs); + + return NETDEV_TX_OK; + } + +out: + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } +void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + + /* Read current PTP time to get seconds */ + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE); + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN); + + /* Read packet HW timestamp from FIFO */ + val = ocelot_read(ocelot, SYS_PTP_TXSTAMP); + ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val); + + /* Sec has incremented since the ts was registered */ + if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC)) + ts->tv_sec--; + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); +} +EXPORT_SYMBOL(ocelot_get_hwtimestamp); + static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr) { struct ocelot_port *port = netdev_priv(dev); @@ -917,6 +974,97 @@ static int ocelot_get_port_parent_id(struct net_device *dev, return 0; } +static int ocelot_hwstamp_get(struct ocelot_port *port, struct ifreq *ifr) +{ + struct ocelot *ocelot = port->ocelot; + + return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config, + sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0; +} + +static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr) +{ + struct ocelot *ocelot = port->ocelot; + struct hwtstamp_config cfg; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + /* reserved for future extensions */ + if (cfg.flags) + return -EINVAL; + + /* Tx type sanity check */ + switch (cfg.tx_type) { + case HWTSTAMP_TX_ON: + port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we + * need to update the origin time. + */ + port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP; + break; + case HWTSTAMP_TX_OFF: + port->ptp_cmd = 0; + break; + default: + return -ERANGE; + } + + mutex_lock(&ocelot->ptp_lock); + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + default: + mutex_unlock(&ocelot->ptp_lock); + return -ERANGE; + } + + /* Commit back the result & save it */ + memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg)); + mutex_unlock(&ocelot->ptp_lock); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + + /* The function is only used for PTP operations for now */ + if (!ocelot->ptp) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCSHWTSTAMP: + return ocelot_hwstamp_set(port, ifr); + case SIOCGHWTSTAMP: + return ocelot_hwstamp_get(port, ifr); + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_open = ocelot_port_open, .ndo_stop = ocelot_port_stop, @@ -933,6 +1081,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_set_features = ocelot_set_features, .ndo_get_port_parent_id = ocelot_get_port_parent_id, .ndo_setup_tc = ocelot_setup_tc, + .ndo_do_ioctl = ocelot_ioctl, }; static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) @@ -1014,12 +1163,37 @@ static int ocelot_get_sset_count(struct net_device *dev, int sset) return ocelot->num_stats; } +static int ocelot_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ocelot_port *ocelot_port = netdev_priv(dev); + struct ocelot *ocelot = ocelot_port->ocelot; + + if (!ocelot->ptp) + return ethtool_op_get_ts_info(dev, info); + + info->phc_index = ocelot->ptp_clock ? + ptp_clock_index(ocelot->ptp_clock) : -1; + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | + BIT(HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + static const struct ethtool_ops ocelot_ethtool_ops = { .get_strings = ocelot_get_strings, .get_ethtool_stats = ocelot_get_ethtool_stats, .get_sset_count = ocelot_get_sset_count, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ts_info = ocelot_get_ts_info, }; static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port, @@ -1629,6 +1803,196 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = { }; EXPORT_SYMBOL(ocelot_switchdev_blocking_nb); +int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); + unsigned long flags; + time64_t s; + u32 val; + s64 ns; + + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE); + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + + s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff; + s <<= 32; + s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN); + ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN); + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + + /* Deal with negative values */ + if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) { + s--; + ns &= 0xf; + ns += 999999984; + } + + set_normalized_timespec64(ts, s, ns); + return 0; +} +EXPORT_SYMBOL(ocelot_ptp_gettime64); + +static int ocelot_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE); + + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + + ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB, + TOD_ACC_PIN); + ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB, + TOD_ACC_PIN); + ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN); + + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD); + + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + return 0; +} + +static int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) { + struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE); + + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + + ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN); + ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN); + ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN); + + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA); + + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + } else { + /* Fall back using ocelot_ptp_settime64 which is not exact. */ + struct timespec64 ts; + u64 now; + + ocelot_ptp_gettime64(ptp, &ts); + + now = ktime_to_ns(timespec64_to_ktime(ts)); + ts = ns_to_timespec64(now + delta); + + ocelot_ptp_settime64(ptp, &ts); + } + return 0; +} + +static int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); + u32 unit = 0, direction = 0; + unsigned long flags; + u64 adj = 0; + + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + + if (!scaled_ppm) + goto disable_adj; + + if (scaled_ppm < 0) { + direction = PTP_CFG_CLK_ADJ_CFG_DIR; + scaled_ppm = -scaled_ppm; + } + + adj = PSEC_PER_SEC << 16; + do_div(adj, scaled_ppm); + do_div(adj, 1000); + + /* If the adjustment value is too large, use ns instead */ + if (adj >= (1L << 30)) { + unit = PTP_CFG_CLK_ADJ_FREQ_NS; + do_div(adj, 1000); + } + + /* Still too big */ + if (adj >= (1L << 30)) + goto disable_adj; + + ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ); + ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction, + PTP_CLK_CFG_ADJ_CFG); + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + return 0; + +disable_adj: + ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG); + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + return 0; +} + +static struct ptp_clock_info ocelot_ptp_clock_info = { + .owner = THIS_MODULE, + .name = "ocelot ptp", + .max_adj = 0x7fffffff, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .gettime64 = ocelot_ptp_gettime64, + .settime64 = ocelot_ptp_settime64, + .adjtime = ocelot_ptp_adjtime, + .adjfine = ocelot_ptp_adjfine, +}; + +static int ocelot_init_timestamp(struct ocelot *ocelot) +{ + ocelot->ptp_info = ocelot_ptp_clock_info; + ocelot->ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev); + if (IS_ERR(ocelot->ptp_clock)) + return PTR_ERR(ocelot->ptp_clock); + /* Check if PHC support is missing at the configuration level */ + if (!ocelot->ptp_clock) + return 0; + + ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG); + ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW); + ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH); + + ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC); + + /* There is no device reconfiguration, PTP Rx stamping is always + * enabled. + */ + ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + + return 0; +} + int ocelot_probe_port(struct ocelot *ocelot, u8 port, void __iomem *regs, struct phy_device *phy) @@ -1661,6 +2025,8 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid, ENTRYTYPE_LOCKED); + INIT_LIST_HEAD(&ocelot_port->skbs); + err = register_netdev(dev); if (err) { dev_err(ocelot->dev, "register_netdev failed\n"); @@ -1684,7 +2050,7 @@ EXPORT_SYMBOL(ocelot_probe_port); int ocelot_init(struct ocelot *ocelot) { u32 port; - int i, cpu = ocelot->num_phys_ports; + int i, ret, cpu = ocelot->num_phys_ports; char queue_name[32]; ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports, @@ -1699,6 +2065,8 @@ int ocelot_init(struct ocelot *ocelot) return -ENOMEM; mutex_init(&ocelot->stats_lock); + mutex_init(&ocelot->ptp_lock); + spin_lock_init(&ocelot->ptp_clock_lock); snprintf(queue_name, sizeof(queue_name), "%s-stats", dev_name(ocelot->dev)); ocelot->stats_queue = create_singlethread_workqueue(queue_name); @@ -1812,16 +2180,43 @@ int ocelot_init(struct ocelot *ocelot) INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); + + if (ocelot->ptp) { + ret = ocelot_init_timestamp(ocelot); + if (ret) { + dev_err(ocelot->dev, + "Timestamp initialization failed\n"); + return ret; + } + } + return 0; } EXPORT_SYMBOL(ocelot_init); void ocelot_deinit(struct ocelot *ocelot) { + struct list_head *pos, *tmp; + struct ocelot_port *port; + struct ocelot_skb *entry; + int i; + cancel_delayed_work(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); mutex_destroy(&ocelot->stats_lock); ocelot_ace_deinit(); + + for (i = 0; i < ocelot->num_phys_ports; i++) { + port = ocelot->ports[i]; + + list_for_each_safe(pos, tmp, &port->skbs) { + entry = list_entry(pos, struct ocelot_skb, head); + + list_del(pos); + dev_kfree_skb_any(entry->skb); + kfree(entry); + } + } } EXPORT_SYMBOL(ocelot_deinit); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index f7eeb4806897..e40773c01a44 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -11,9 +11,11 @@ #include <linux/bitops.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> +#include <linux/net_tstamp.h> #include <linux/phy.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/ptp_clock_kernel.h> #include <linux/regmap.h> #include "ocelot_ana.h" @@ -23,6 +25,7 @@ #include "ocelot_sys.h" #include "ocelot_qs.h" #include "ocelot_tc.h" +#include "ocelot_ptp.h" #define PGID_AGGR 64 #define PGID_SRC 80 @@ -38,14 +41,17 @@ #define OCELOT_STATS_CHECK_DELAY (2 * HZ) +#define OCELOT_PTP_QUEUE_SZ 128 + #define IFH_LEN 4 struct frame_info { u32 len; u16 port; u16 vid; - u8 cpuq; u8 tag_type; + u16 rew_op; + u32 timestamp; /* rew_val */ }; #define IFH_INJ_BYPASS BIT(31) @@ -54,6 +60,12 @@ struct frame_info { #define IFH_TAG_TYPE_C 0 #define IFH_TAG_TYPE_S 1 +#define IFH_REW_OP_NOOP 0x0 +#define IFH_REW_OP_DSCP 0x1 +#define IFH_REW_OP_ONE_STEP_PTP 0x2 +#define IFH_REW_OP_TWO_STEP_PTP 0x3 +#define IFH_REW_OP_ORIGIN_PTP 0x5 + #define OCELOT_SPEED_2500 0 #define OCELOT_SPEED_1000 1 #define OCELOT_SPEED_100 2 @@ -71,6 +83,7 @@ enum ocelot_target { SYS, S2, HSIO, + PTP, TARGET_MAX, }; @@ -343,6 +356,13 @@ enum ocelot_reg { S2_CACHE_ACTION_DAT, S2_CACHE_CNT_DAT, S2_CACHE_TG_DAT, + PTP_PIN_CFG = PTP << TARGET_OFFSET, + PTP_PIN_TOD_SEC_MSB, + PTP_PIN_TOD_SEC_LSB, + PTP_PIN_TOD_NSEC, + PTP_CFG_MISC, + PTP_CLK_CFG_ADJ_CFG, + PTP_CLK_CFG_ADJ_FREQ, }; enum ocelot_regfield { @@ -393,6 +413,13 @@ enum ocelot_regfield { REGFIELD_MAX }; +enum ocelot_clk_pins { + ALT_PPS_PIN = 1, + EXT_CLK_PIN, + ALT_LDST_PIN, + TOD_ACC_PIN +}; + struct ocelot_multicast { struct list_head list; unsigned char addr[ETH_ALEN]; @@ -442,6 +469,13 @@ struct ocelot { u64 *stats; struct delayed_work stats_work; struct workqueue_struct *stats_queue; + + u8 ptp:1; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_info; + struct hwtstamp_config hwtstamp_config; + struct mutex ptp_lock; /* Protects the PTP interface state */ + spinlock_t ptp_clock_lock; /* Protects the PTP clock */ }; struct ocelot_port { @@ -465,6 +499,16 @@ struct ocelot_port { struct phy *serdes; struct ocelot_port_tc tc; + + u8 ptp_cmd; + struct list_head skbs; + u8 ts_id; +}; + +struct ocelot_skb { + struct list_head head; + struct sk_buff *skb; + u8 id; }; u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset); @@ -509,4 +553,7 @@ extern struct notifier_block ocelot_netdevice_nb; extern struct notifier_block ocelot_switchdev_nb; extern struct notifier_block ocelot_switchdev_blocking_nb; +int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts); +void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts); + #endif diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 2451d4a96490..b063eb78fa0c 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -16,24 +16,27 @@ #include "ocelot.h" -static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info) +#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0)) + +static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info) { - int i; u8 llen, wlen; + u64 ifh[2]; + + ifh[0] = be64_to_cpu(((__force __be64 *)_ifh)[0]); + ifh[1] = be64_to_cpu(((__force __be64 *)_ifh)[1]); - /* The IFH is in network order, switch to CPU order */ - for (i = 0; i < IFH_LEN; i++) - ifh[i] = ntohl((__force __be32)ifh[i]); + wlen = IFH_EXTRACT_BITFIELD64(ifh[0], 7, 8); + llen = IFH_EXTRACT_BITFIELD64(ifh[0], 15, 6); - wlen = (ifh[1] >> 7) & 0xff; - llen = (ifh[1] >> 15) & 0x3f; info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80; - info->port = (ifh[2] & GENMASK(14, 11)) >> 11; + info->timestamp = IFH_EXTRACT_BITFIELD64(ifh[0], 21, 32); + + info->port = IFH_EXTRACT_BITFIELD64(ifh[1], 43, 4); - info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20; - info->tag_type = (ifh[3] & BIT(16)) >> 16; - info->vid = ifh[3] & GENMASK(11, 0); + info->tag_type = IFH_EXTRACT_BITFIELD64(ifh[1], 16, 1); + info->vid = IFH_EXTRACT_BITFIELD64(ifh[1], 0, 12); return 0; } @@ -91,13 +94,14 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) return IRQ_NONE; do { - struct sk_buff *skb; + struct skb_shared_hwtstamps *shhwtstamps; + u64 tod_in_ns, full_ts_in_ns; + struct frame_info info = {}; struct net_device *dev; - u32 *buf; + u32 ifh[4], val, *buf; + struct timespec64 ts; int sz, len, buf_len; - u32 ifh[4]; - u32 val; - struct frame_info info; + struct sk_buff *skb; for (i = 0; i < IFH_LEN; i++) { err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]); @@ -144,6 +148,22 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) break; } + if (ocelot->ptp) { + ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); + + tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); + if ((tod_in_ns & 0xffffffff) < info.timestamp) + full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | + info.timestamp; + else + full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) | + info.timestamp; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = full_ts_in_ns; + } + /* Everything we see on an interface that is in the HW bridge * has already been forwarded. */ @@ -163,6 +183,66 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) return IRQ_HANDLED; } +static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg) +{ + int budget = OCELOT_PTP_QUEUE_SZ; + struct ocelot *ocelot = arg; + + while (budget--) { + struct skb_shared_hwtstamps shhwtstamps; + struct list_head *pos, *tmp; + struct sk_buff *skb = NULL; + struct ocelot_skb *entry; + struct ocelot_port *port; + struct timespec64 ts; + u32 val, id, txport; + + val = ocelot_read(ocelot, SYS_PTP_STATUS); + + /* Check if a timestamp can be retrieved */ + if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD)) + break; + + WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL); + + /* Retrieve the ts ID and Tx port */ + id = SYS_PTP_STATUS_PTP_MESS_ID_X(val); + txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); + + /* Retrieve its associated skb */ + port = ocelot->ports[txport]; + + list_for_each_safe(pos, tmp, &port->skbs) { + entry = list_entry(pos, struct ocelot_skb, head); + if (entry->id != id) + continue; + + skb = entry->skb; + + list_del(pos); + kfree(entry); + } + + /* Next ts */ + ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); + + if (unlikely(!skb)) + continue; + + /* Get the h/w timestamp */ + ocelot_get_hwtimestamp(ocelot, &ts); + + /* Set the timestamp into the skb */ + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb, &shhwtstamps); + + dev_kfree_skb_any(skb); + } + + return IRQ_HANDLED; +} + static const struct of_device_id mscc_ocelot_match[] = { { .compatible = "mscc,vsc7514-switch" }, { } @@ -171,17 +251,18 @@ MODULE_DEVICE_TABLE(of, mscc_ocelot_match); static int mscc_ocelot_probe(struct platform_device *pdev) { - int err, irq; - unsigned int i; struct device_node *np = pdev->dev.of_node; struct device_node *ports, *portnp; + int err, irq_xtr, irq_ptp_rdy; struct ocelot *ocelot; struct regmap *hsio; + unsigned int i; u32 val; struct { enum ocelot_target id; char *name; + u8 optional:1; } res[] = { { SYS, "sys" }, { REW, "rew" }, @@ -189,6 +270,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) { ANA, "ana" }, { QS, "qs" }, { S2, "s2" }, + { PTP, "ptp", 1 }, }; if (!np && !pdev->dev.platform_data) @@ -205,8 +287,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev) struct regmap *target; target = ocelot_io_platform_init(ocelot, pdev, res[i].name); - if (IS_ERR(target)) + if (IS_ERR(target)) { + if (res[i].optional) { + ocelot->targets[res[i].id] = NULL; + continue; + } + return PTR_ERR(target); + } ocelot->targets[res[i].id] = target; } @@ -223,16 +311,29 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (err) return err; - irq = platform_get_irq_byname(pdev, "xtr"); - if (irq < 0) + irq_xtr = platform_get_irq_byname(pdev, "xtr"); + if (irq_xtr < 0) return -ENODEV; - err = devm_request_threaded_irq(&pdev->dev, irq, NULL, + err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL, ocelot_xtr_irq_handler, IRQF_ONESHOT, "frame extraction", ocelot); if (err) return err; + irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy"); + if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) { + err = devm_request_threaded_irq(&pdev->dev, irq_ptp_rdy, NULL, + ocelot_ptp_rdy_irq_handler, + IRQF_ONESHOT, "ptp ready", + ocelot); + if (err) + return err; + + /* Both the PTP interrupt and the PTP bank are available */ + ocelot->ptp = 1; + } + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1); regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.h b/drivers/net/ethernet/mscc/ocelot_ptp.h new file mode 100644 index 000000000000..9ede14a12573 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_ptp.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * License: Dual MIT/GPL + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_PTP_H_ +#define _MSCC_OCELOT_PTP_H_ + +#define PTP_PIN_CFG_RSZ 0x20 +#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ +#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ +#define PTP_PIN_TOD_NSEC_RSZ PTP_PIN_CFG_RSZ + +#define PTP_PIN_CFG_DOM BIT(0) +#define PTP_PIN_CFG_SYNC BIT(2) +#define PTP_PIN_CFG_ACTION(x) ((x) << 3) +#define PTP_PIN_CFG_ACTION_MASK PTP_PIN_CFG_ACTION(0x7) + +enum { + PTP_PIN_ACTION_IDLE = 0, + PTP_PIN_ACTION_LOAD, + PTP_PIN_ACTION_SAVE, + PTP_PIN_ACTION_CLOCK, + PTP_PIN_ACTION_DELTA, + PTP_PIN_ACTION_NOSYNC, + PTP_PIN_ACTION_SYNC, +}; + +#define PTP_CFG_MISC_PTP_EN BIT(2) + +#define PSEC_PER_SEC 1000000000000LL + +#define PTP_CFG_CLK_ADJ_CFG_ENA BIT(0) +#define PTP_CFG_CLK_ADJ_CFG_DIR BIT(1) + +#define PTP_CFG_CLK_ADJ_FREQ_NS BIT(30) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c index 6c387f994ec5..e59977d20400 100644 --- a/drivers/net/ethernet/mscc/ocelot_regs.c +++ b/drivers/net/ethernet/mscc/ocelot_regs.c @@ -234,6 +234,16 @@ static const u32 ocelot_s2_regmap[] = { REG(S2_CACHE_TG_DAT, 0x000388), }; +static const u32 ocelot_ptp_regmap[] = { + REG(PTP_PIN_CFG, 0x000000), + REG(PTP_PIN_TOD_SEC_MSB, 0x000004), + REG(PTP_PIN_TOD_SEC_LSB, 0x000008), + REG(PTP_PIN_TOD_NSEC, 0x00000c), + REG(PTP_CFG_MISC, 0x0000a0), + REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4), + REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), +}; + static const u32 *ocelot_regmap[] = { [ANA] = ocelot_ana_regmap, [QS] = ocelot_qs_regmap, @@ -241,6 +251,7 @@ static const u32 *ocelot_regmap[] = { [REW] = ocelot_rew_regmap, [SYS] = ocelot_sys_regmap, [S2] = ocelot_s2_regmap, + [PTP] = ocelot_ptp_regmap, }; static const struct reg_field ocelot_regfields[] = { diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 337b0cbfd153..c979f38a2e0c 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1286,7 +1286,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb) { u8 *va; struct vlan_ethhdr *veh; - struct skb_frag_struct *frag; + skb_frag_t *frag; __wsum vsum; va = addr; @@ -1306,8 +1306,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb) skb->len -= VLAN_HLEN; skb->data_len -= VLAN_HLEN; frag = skb_shinfo(skb)->frags; - frag->page_offset += VLAN_HLEN; - skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN); + skb_frag_off_add(frag, VLAN_HLEN); + skb_frag_size_sub(frag, VLAN_HLEN); } } @@ -1318,7 +1318,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) { struct myri10ge_priv *mgp = ss->mgp; struct sk_buff *skb; - struct skb_frag_struct *rx_frags; + skb_frag_t *rx_frags; struct myri10ge_rx_buf *rx; int i, idx, remainder, bytes; struct pci_dev *pdev = mgp->pdev; @@ -1351,7 +1351,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) return 0; } rx_frags = skb_shinfo(skb)->frags; - /* Fill skb_frag_struct(s) with data from our receive */ + /* Fill skb_frag_t(s) with data from our receive */ for (i = 0, remainder = len; remainder > 0; i++) { myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); skb_fill_page_desc(skb, i, rx->info[idx].page, @@ -1364,8 +1364,8 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) } /* remove padding */ - rx_frags[0].page_offset += MXGEFW_PAD; - rx_frags[0].size -= MXGEFW_PAD; + skb_frag_off_add(&rx_frags[0], MXGEFW_PAD); + skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD); len -= MXGEFW_PAD; skb->len = len; @@ -2628,7 +2628,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, struct myri10ge_slice_state *ss; struct mcp_kreq_ether_send *req; struct myri10ge_tx_buf *tx; - struct skb_frag_struct *frag; + skb_frag_t *frag; struct netdev_queue *netdev_queue; dma_addr_t bus; u32 low; @@ -3037,7 +3037,6 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr) static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) { struct myri10ge_priv *mgp = netdev_priv(dev); - int error = 0; netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu); if (mgp->running) { @@ -3049,7 +3048,7 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) } else dev->mtu = new_mtu; - return error; + return 0; } /* diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 2805641965f3..d31772ae511d 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -17,6 +17,7 @@ nfp-objs := \ nfpcore/nfp_target.o \ ccm.o \ ccm_mbox.o \ + devlink_param.o \ nfp_asm.o \ nfp_app.o \ nfp_app_nic.o \ diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index bc9850e4ec5e..0e2db6ea79e9 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -6,6 +6,7 @@ #include <linux/bug.h> #include <linux/jiffies.h> #include <linux/skbuff.h> +#include <linux/timekeeping.h> #include "../ccm.h" #include "../nfp_app.h" @@ -175,29 +176,151 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; } +static bool nfp_bpf_ctrl_op_cache_invalidate(enum nfp_ccm_type op) +{ + return op == NFP_CCM_TYPE_BPF_MAP_UPDATE || + op == NFP_CCM_TYPE_BPF_MAP_DELETE; +} + +static bool nfp_bpf_ctrl_op_cache_capable(enum nfp_ccm_type op) +{ + return op == NFP_CCM_TYPE_BPF_MAP_LOOKUP || + op == NFP_CCM_TYPE_BPF_MAP_GETNEXT; +} + +static bool nfp_bpf_ctrl_op_cache_fill(enum nfp_ccm_type op) +{ + return op == NFP_CCM_TYPE_BPF_MAP_GETFIRST || + op == NFP_CCM_TYPE_BPF_MAP_GETNEXT; +} + +static unsigned int +nfp_bpf_ctrl_op_cache_get(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op, + const u8 *key, u8 *out_key, u8 *out_value, + u32 *cache_gen) +{ + struct bpf_map *map = &nfp_map->offmap->map; + struct nfp_app_bpf *bpf = nfp_map->bpf; + unsigned int i, count, n_entries; + struct cmsg_reply_map_op *reply; + + n_entries = nfp_bpf_ctrl_op_cache_fill(op) ? bpf->cmsg_cache_cnt : 1; + + spin_lock(&nfp_map->cache_lock); + *cache_gen = nfp_map->cache_gen; + if (nfp_map->cache_blockers) + n_entries = 1; + + if (nfp_bpf_ctrl_op_cache_invalidate(op)) + goto exit_block; + if (!nfp_bpf_ctrl_op_cache_capable(op)) + goto exit_unlock; + + if (!nfp_map->cache) + goto exit_unlock; + if (nfp_map->cache_to < ktime_get_ns()) + goto exit_invalidate; + + reply = (void *)nfp_map->cache->data; + count = be32_to_cpu(reply->count); + + for (i = 0; i < count; i++) { + void *cached_key; + + cached_key = nfp_bpf_ctrl_reply_key(bpf, reply, i); + if (memcmp(cached_key, key, map->key_size)) + continue; + + if (op == NFP_CCM_TYPE_BPF_MAP_LOOKUP) + memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, i), + map->value_size); + if (op == NFP_CCM_TYPE_BPF_MAP_GETNEXT) { + if (i + 1 == count) + break; + + memcpy(out_key, + nfp_bpf_ctrl_reply_key(bpf, reply, i + 1), + map->key_size); + } + + n_entries = 0; + goto exit_unlock; + } + goto exit_unlock; + +exit_block: + nfp_map->cache_blockers++; +exit_invalidate: + dev_consume_skb_any(nfp_map->cache); + nfp_map->cache = NULL; +exit_unlock: + spin_unlock(&nfp_map->cache_lock); + return n_entries; +} + +static void +nfp_bpf_ctrl_op_cache_put(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op, + struct sk_buff *skb, u32 cache_gen) +{ + bool blocker, filler; + + blocker = nfp_bpf_ctrl_op_cache_invalidate(op); + filler = nfp_bpf_ctrl_op_cache_fill(op); + if (blocker || filler) { + u64 to = 0; + + if (filler) + to = ktime_get_ns() + NFP_BPF_MAP_CACHE_TIME_NS; + + spin_lock(&nfp_map->cache_lock); + if (blocker) { + nfp_map->cache_blockers--; + nfp_map->cache_gen++; + } + if (filler && !nfp_map->cache_blockers && + nfp_map->cache_gen == cache_gen) { + nfp_map->cache_to = to; + swap(nfp_map->cache, skb); + } + spin_unlock(&nfp_map->cache_lock); + } + + dev_consume_skb_any(skb); +} + static int nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op, u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value) { struct nfp_bpf_map *nfp_map = offmap->dev_priv; + unsigned int n_entries, reply_entries, count; struct nfp_app_bpf *bpf = nfp_map->bpf; struct bpf_map *map = &offmap->map; struct cmsg_reply_map_op *reply; struct cmsg_req_map_op *req; struct sk_buff *skb; + u32 cache_gen; int err; /* FW messages have no space for more than 32 bits of flags */ if (flags >> 32) return -EOPNOTSUPP; + /* Handle op cache */ + n_entries = nfp_bpf_ctrl_op_cache_get(nfp_map, op, key, out_key, + out_value, &cache_gen); + if (!n_entries) + return 0; + skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1); - if (!skb) - return -ENOMEM; + if (!skb) { + err = -ENOMEM; + goto err_cache_put; + } req = (void *)skb->data; req->tid = cpu_to_be32(nfp_map->tid); - req->count = cpu_to_be32(1); + req->count = cpu_to_be32(n_entries); req->flags = cpu_to_be32(flags); /* Copy inputs */ @@ -207,16 +330,38 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op, memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value, map->value_size); - skb = nfp_ccm_communicate(&bpf->ccm, skb, op, - nfp_bpf_cmsg_map_reply_size(bpf, 1)); - if (IS_ERR(skb)) - return PTR_ERR(skb); + skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 0); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + goto err_cache_put; + } + + if (skb->len < sizeof(*reply)) { + cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d!\n", + op, skb->len); + err = -EIO; + goto err_free; + } reply = (void *)skb->data; + count = be32_to_cpu(reply->count); err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); + /* FW responds with message sized to hold the good entries, + * plus one extra entry if there was an error. + */ + reply_entries = count + !!err; + if (n_entries > 1 && count) + err = 0; if (err) goto err_free; + if (skb->len != nfp_bpf_cmsg_map_reply_size(bpf, reply_entries)) { + cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d for %d entries!\n", + op, skb->len, reply_entries); + err = -EIO; + goto err_free; + } + /* Copy outputs */ if (out_key) memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0), @@ -225,11 +370,13 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op, memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0), map->value_size); - dev_consume_skb_any(skb); + nfp_bpf_ctrl_op_cache_put(nfp_map, op, skb, cache_gen); return 0; err_free: dev_kfree_skb_any(skb); +err_cache_put: + nfp_bpf_ctrl_op_cache_put(nfp_map, op, NULL, cache_gen); return err; } @@ -267,11 +414,29 @@ int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, key, NULL, 0, next_key, NULL); } +unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf) +{ + return max(nfp_bpf_cmsg_map_req_size(bpf, 1), + nfp_bpf_cmsg_map_reply_size(bpf, 1)); +} + unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf) { - return max3((unsigned int)NFP_NET_DEFAULT_MTU, - nfp_bpf_cmsg_map_req_size(bpf, 1), - nfp_bpf_cmsg_map_reply_size(bpf, 1)); + return max3(NFP_NET_DEFAULT_MTU, + nfp_bpf_cmsg_map_req_size(bpf, NFP_BPF_MAP_CACHE_CNT), + nfp_bpf_cmsg_map_reply_size(bpf, NFP_BPF_MAP_CACHE_CNT)); +} + +unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf) +{ + unsigned int mtu, req_max, reply_max, entry_sz; + + mtu = bpf->app->ctrl->dp.mtu; + entry_sz = bpf->cmsg_key_sz + bpf->cmsg_val_sz; + req_max = (mtu - sizeof(struct cmsg_req_map_op)) / entry_sz; + reply_max = (mtu - sizeof(struct cmsg_reply_map_op)) / entry_sz; + + return min3(req_max, reply_max, NFP_BPF_MAP_CACHE_CNT); } void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index 06c4286bd79e..a83a0ad5e27d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -24,6 +24,7 @@ enum bpf_cap_tlv_type { NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5, NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6, NFP_BPF_CAP_TYPE_ABI_VERSION = 7, + NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT = 8, }; struct nfp_bpf_cap_tlv_func { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 1c9fb11470df..8f732771d3fa 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -300,6 +300,14 @@ nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value, } static int +nfp_bpf_parse_cap_cmsg_multi_ent(struct nfp_app_bpf *bpf, void __iomem *value, + u32 length) +{ + bpf->cmsg_multi_ent = true; + return 0; +} + +static int nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) { @@ -375,6 +383,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) length)) goto err_release_free; break; + case NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT: + if (nfp_bpf_parse_cap_cmsg_multi_ent(app->priv, value, + length)) + goto err_release_free; + break; default: nfp_dbg(cpp, "unknown BPF capability: %d\n", type); break; @@ -415,6 +428,25 @@ static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev) bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev); } +static int nfp_bpf_start(struct nfp_app *app) +{ + struct nfp_app_bpf *bpf = app->priv; + + if (app->ctrl->dp.mtu < nfp_bpf_ctrl_cmsg_min_mtu(bpf)) { + nfp_err(bpf->app->cpp, + "ctrl channel MTU below min required %u < %u\n", + app->ctrl->dp.mtu, nfp_bpf_ctrl_cmsg_min_mtu(bpf)); + return -EINVAL; + } + + if (bpf->cmsg_multi_ent) + bpf->cmsg_cache_cnt = nfp_bpf_ctrl_cmsg_cache_cnt(bpf); + else + bpf->cmsg_cache_cnt = 1; + + return 0; +} + static int nfp_bpf_init(struct nfp_app *app) { struct nfp_app_bpf *bpf; @@ -488,6 +520,7 @@ const struct nfp_app_type app_bpf = { .init = nfp_bpf_init, .clean = nfp_bpf_clean, + .start = nfp_bpf_start, .check_mtu = nfp_bpf_check_mtu, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 57d6ff51e980..fac9c6f9e197 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -99,6 +99,7 @@ enum pkt_vec { * @maps_neutral: hash table of offload-neutral maps (on pointer) * * @abi_version: global BPF ABI version + * @cmsg_cache_cnt: number of entries to read for caching * * @adjust_head: adjust head capability * @adjust_head.flags: extra flags for adjust head @@ -124,6 +125,7 @@ enum pkt_vec { * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) * @queue_select: BPF can set the RX queue ID in packet vector * @adjust_tail: BPF can simply trunc packet size for adjust tail + * @cmsg_multi_ent: FW can pack multiple map entries in a single cmsg */ struct nfp_app_bpf { struct nfp_app *app; @@ -134,6 +136,8 @@ struct nfp_app_bpf { unsigned int cmsg_key_sz; unsigned int cmsg_val_sz; + unsigned int cmsg_cache_cnt; + struct list_head map_list; unsigned int maps_in_use; unsigned int map_elems_in_use; @@ -169,6 +173,7 @@ struct nfp_app_bpf { bool pseudo_random; bool queue_select; bool adjust_tail; + bool cmsg_multi_ent; }; enum nfp_bpf_map_use { @@ -183,11 +188,21 @@ struct nfp_bpf_map_word { unsigned char non_zero_update :1; }; +#define NFP_BPF_MAP_CACHE_CNT 4U +#define NFP_BPF_MAP_CACHE_TIME_NS (250 * 1000) + /** * struct nfp_bpf_map - private per-map data attached to BPF maps for offload * @offmap: pointer to the offloaded BPF map * @bpf: back pointer to bpf app private structure * @tid: table id identifying map on datapath + * + * @cache_lock: protects @cache_blockers, @cache_to, @cache + * @cache_blockers: number of ops in flight which block caching + * @cache_gen: counter incremented by every blocker on exit + * @cache_to: time when cache will no longer be valid (ns) + * @cache: skb with cached response + * * @l: link on the nfp_app_bpf->map_list list * @use_map: map of how the value is used (in 4B chunks) */ @@ -195,6 +210,13 @@ struct nfp_bpf_map { struct bpf_offloaded_map *offmap; struct nfp_app_bpf *bpf; u32 tid; + + spinlock_t cache_lock; + u32 cache_blockers; + u32 cache_gen; + u64 cache_to; + struct sk_buff *cache; + struct list_head l; struct nfp_bpf_map_word use_map[]; }; @@ -564,7 +586,9 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); +unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf); unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf); +unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf); long long int nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); void diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 39c9fec222b4..88fab6a82acf 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -385,6 +385,7 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) offmap->dev_priv = nfp_map; nfp_map->offmap = offmap; nfp_map->bpf = bpf; + spin_lock_init(&nfp_map->cache_lock); res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map); if (res < 0) { @@ -407,6 +408,8 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) struct nfp_bpf_map *nfp_map = offmap->dev_priv; nfp_bpf_ctrl_free_map(bpf, nfp_map); + dev_consume_skb_any(nfp_map->cache); + WARN_ON_ONCE(nfp_map->cache_blockers); list_del_init(&nfp_map->l); bpf->map_elems_in_use -= offmap->map.max_entries; bpf->maps_in_use--; diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c new file mode 100644 index 000000000000..36491835ac65 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2019 Netronome Systems, Inc. */ + +#include <net/devlink.h> + +#include "nfpcore/nfp.h" +#include "nfpcore/nfp_nsp.h" +#include "nfp_main.h" + +/** + * struct nfp_devlink_param_u8_arg - Devlink u8 parameter get/set arguments + * @hwinfo_name: HWinfo key name + * @default_hi_val: Default HWinfo value if HWinfo doesn't exist + * @invalid_dl_val: Devlink value to use if HWinfo is unknown/invalid. + * -errno if there is no unknown/invalid value available + * @hi_to_dl: HWinfo to devlink value mapping + * @dl_to_hi: Devlink to hwinfo value mapping + * @max_dl_val: Maximum devlink value supported, for validation only + * @max_hi_val: Maximum HWinfo value supported, for validation only + */ +struct nfp_devlink_param_u8_arg { + const char *hwinfo_name; + const char *default_hi_val; + int invalid_dl_val; + u8 hi_to_dl[4]; + u8 dl_to_hi[4]; + u8 max_dl_val; + u8 max_hi_val; +}; + +static const struct nfp_devlink_param_u8_arg nfp_devlink_u8_args[] = { + [DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY] = { + .hwinfo_name = "app_fw_from_flash", + .default_hi_val = NFP_NSP_APP_FW_LOAD_DEFAULT, + .invalid_dl_val = + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_UNKNOWN, + .hi_to_dl = { + [NFP_NSP_APP_FW_LOAD_DISK] = + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK, + [NFP_NSP_APP_FW_LOAD_FLASH] = + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH, + [NFP_NSP_APP_FW_LOAD_PREF] = + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER, + }, + .dl_to_hi = { + [DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER] = + NFP_NSP_APP_FW_LOAD_PREF, + [DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH] = + NFP_NSP_APP_FW_LOAD_FLASH, + [DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK] = + NFP_NSP_APP_FW_LOAD_DISK, + }, + .max_dl_val = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK, + .max_hi_val = NFP_NSP_APP_FW_LOAD_PREF, + }, + [DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE] = { + .hwinfo_name = "abi_drv_reset", + .default_hi_val = NFP_NSP_DRV_RESET_DEFAULT, + .invalid_dl_val = + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_UNKNOWN, + .hi_to_dl = { + [NFP_NSP_DRV_RESET_ALWAYS] = + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_ALWAYS, + [NFP_NSP_DRV_RESET_NEVER] = + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_NEVER, + [NFP_NSP_DRV_RESET_DISK] = + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK, + }, + .dl_to_hi = { + [DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_ALWAYS] = + NFP_NSP_DRV_RESET_ALWAYS, + [DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_NEVER] = + NFP_NSP_DRV_RESET_NEVER, + [DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK] = + NFP_NSP_DRV_RESET_DISK, + }, + .max_dl_val = DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK, + .max_hi_val = NFP_NSP_DRV_RESET_NEVER, + } +}; + +static int +nfp_devlink_param_u8_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + const struct nfp_devlink_param_u8_arg *arg; + struct nfp_pf *pf = devlink_priv(devlink); + struct nfp_nsp *nsp; + char hwinfo[32]; + long value; + int err; + + if (id >= ARRAY_SIZE(nfp_devlink_u8_args)) + return -EOPNOTSUPP; + + arg = &nfp_devlink_u8_args[id]; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + nfp_warn(pf->cpp, "can't access NSP: %d\n", err); + return err; + } + + snprintf(hwinfo, sizeof(hwinfo), arg->hwinfo_name); + err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), + arg->default_hi_val); + if (err) { + nfp_warn(pf->cpp, "HWinfo lookup failed: %d\n", err); + goto exit_close_nsp; + } + + err = kstrtol(hwinfo, 0, &value); + if (err || value < 0 || value > arg->max_hi_val) { + nfp_warn(pf->cpp, "HWinfo '%s' value %li invalid\n", + arg->hwinfo_name, value); + + if (arg->invalid_dl_val >= 0) + ctx->val.vu8 = arg->invalid_dl_val; + else + err = arg->invalid_dl_val; + + goto exit_close_nsp; + } + + ctx->val.vu8 = arg->hi_to_dl[value]; + +exit_close_nsp: + nfp_nsp_close(nsp); + return err; +} + +static int +nfp_devlink_param_u8_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + const struct nfp_devlink_param_u8_arg *arg; + struct nfp_pf *pf = devlink_priv(devlink); + struct nfp_nsp *nsp; + char hwinfo[32]; + int err; + + if (id >= ARRAY_SIZE(nfp_devlink_u8_args)) + return -EOPNOTSUPP; + + arg = &nfp_devlink_u8_args[id]; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + nfp_warn(pf->cpp, "can't access NSP: %d\n", err); + return err; + } + + /* Note the value has already been validated. */ + snprintf(hwinfo, sizeof(hwinfo), "%s=%u", + arg->hwinfo_name, arg->dl_to_hi[ctx->val.vu8]); + err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo)); + if (err) { + nfp_warn(pf->cpp, "HWinfo set failed: %d\n", err); + goto exit_close_nsp; + } + +exit_close_nsp: + nfp_nsp_close(nsp); + return err; +} + +static int +nfp_devlink_param_u8_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + const struct nfp_devlink_param_u8_arg *arg; + + if (id >= ARRAY_SIZE(nfp_devlink_u8_args)) + return -EOPNOTSUPP; + + arg = &nfp_devlink_u8_args[id]; + + if (val.vu8 > arg->max_dl_val) { + NL_SET_ERR_MSG_MOD(extack, "parameter out of range"); + return -EINVAL; + } + + if (val.vu8 == arg->invalid_dl_val) { + NL_SET_ERR_MSG_MOD(extack, "unknown/invalid value specified"); + return -EINVAL; + } + + return 0; +} + +static const struct devlink_param nfp_devlink_params[] = { + DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + nfp_devlink_param_u8_get, + nfp_devlink_param_u8_set, + nfp_devlink_param_u8_validate), + DEVLINK_PARAM_GENERIC(RESET_DEV_ON_DRV_PROBE, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + nfp_devlink_param_u8_get, + nfp_devlink_param_u8_set, + nfp_devlink_param_u8_validate), +}; + +static int nfp_devlink_supports_params(struct nfp_pf *pf) +{ + struct nfp_nsp *nsp; + bool supported; + int err; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + dev_err(&pf->pdev->dev, "Failed to access the NSP: %d\n", err); + return err; + } + + supported = nfp_nsp_has_hwinfo_lookup(nsp) && + nfp_nsp_has_hwinfo_set(nsp); + + nfp_nsp_close(nsp); + return supported; +} + +int nfp_devlink_params_register(struct nfp_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + int err; + + err = nfp_devlink_supports_params(pf); + if (err <= 0) + return err; + + err = devlink_params_register(devlink, nfp_devlink_params, + ARRAY_SIZE(nfp_devlink_params)); + if (err) + return err; + + devlink_params_publish(devlink); + return 0; +} + +void nfp_devlink_params_unregister(struct nfp_pf *pf) +{ + int err; + + err = nfp_devlink_supports_params(pf); + if (err <= 0) + return; + + devlink_params_unregister(priv_to_devlink(pf), nfp_devlink_params, + ARRAY_SIZE(nfp_devlink_params)); +} diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 5a54fe848de4..1b019fdfcd97 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -2,10 +2,12 @@ /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> +#include <linux/mpls.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_csum.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> +#include <net/tc_act/tc_mpls.h> #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_tunnel_key.h> @@ -25,6 +27,80 @@ NFP_FL_TUNNEL_KEY | \ NFP_FL_TUNNEL_GENEVE_OPT) +static int +nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + size_t act_size = sizeof(struct nfp_fl_push_mpls); + u32 mpls_lse = 0; + + push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS; + push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; + + /* BOS is optional in the TC action but required for offload. */ + if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) { + mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT; + } else { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push"); + return -EOPNOTSUPP; + } + + /* Leave MPLS TC as a default value of 0 if not explicitly set. */ + if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET) + mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT; + + /* Proto, label and TTL are enforced and verified for MPLS push. */ + mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT; + mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT; + push_mpls->ethtype = act->mpls_push.proto; + push_mpls->lse = cpu_to_be32(mpls_lse); + + return 0; +} + +static void +nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls, + const struct flow_action_entry *act) +{ + size_t act_size = sizeof(struct nfp_fl_pop_mpls); + + pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS; + pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; + pop_mpls->ethtype = act->mpls_pop.proto; +} + +static void +nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls, + const struct flow_action_entry *act) +{ + size_t act_size = sizeof(struct nfp_fl_set_mpls); + u32 mpls_lse = 0, mpls_mask = 0; + + set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS; + set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; + + if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) { + mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT; + mpls_mask |= MPLS_LS_LABEL_MASK; + } + if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) { + mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT; + mpls_mask |= MPLS_LS_TC_MASK; + } + if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) { + mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT; + mpls_mask |= MPLS_LS_S_MASK; + } + if (act->mpls_mangle.ttl) { + mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT; + mpls_mask |= MPLS_LS_TTL_MASK; + } + + set_mpls->lse = cpu_to_be32(mpls_lse); + set_mpls->lse_mask = cpu_to_be32(mpls_mask); +} + static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) { size_t act_size = sizeof(struct nfp_fl_pop_vlan); @@ -97,7 +173,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, struct nfp_fl_payload *nfp_flow, bool last, struct net_device *in_dev, enum nfp_flower_tun_type tun_type, int *tun_out_cnt, - struct netlink_ext_ack *extack) + bool pkt_host, struct netlink_ext_ack *extack) { size_t act_size = sizeof(struct nfp_fl_output); struct nfp_flower_priv *priv = app->priv; @@ -142,6 +218,20 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, return gid; } output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); + } else if (nfp_flower_internal_port_can_offload(app, out_dev)) { + if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware"); + return -EOPNOTSUPP; + } + + if (nfp_flow->pre_tun_rule.dev || !pkt_host) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action"); + return -EOPNOTSUPP; + } + + nfp_flow->pre_tun_rule.dev = out_dev; + + return 0; } else { /* Set action output parameters. */ output->flags = cpu_to_be16(tmp_flags); @@ -809,7 +899,7 @@ nfp_flower_output_action(struct nfp_app *app, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, bool last, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt, u32 *csum_updated, + int *out_cnt, u32 *csum_updated, bool pkt_host, struct netlink_ext_ack *extack) { struct nfp_flower_priv *priv = app->priv; @@ -831,7 +921,7 @@ nfp_flower_output_action(struct nfp_app *app, output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type, - tun_out_cnt, extack); + tun_out_cnt, pkt_host, extack); if (err) return err; @@ -863,30 +953,37 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, struct net_device *netdev, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, int *out_cnt, u32 *csum_updated, - struct nfp_flower_pedit_acts *set_act, + struct nfp_flower_pedit_acts *set_act, bool *pkt_host, struct netlink_ext_ack *extack, int act_idx) { struct nfp_fl_set_ipv4_tun *set_tun; struct nfp_fl_pre_tunnel *pre_tun; struct nfp_fl_push_vlan *psh_v; + struct nfp_fl_push_mpls *psh_m; struct nfp_fl_pop_vlan *pop_v; + struct nfp_fl_pop_mpls *pop_m; + struct nfp_fl_set_mpls *set_m; int err; switch (act->id) { case FLOW_ACTION_DROP: nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); break; + case FLOW_ACTION_REDIRECT_INGRESS: case FLOW_ACTION_REDIRECT: err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, true, tun_type, tun_out_cnt, - out_cnt, csum_updated, extack); + out_cnt, csum_updated, *pkt_host, + extack); if (err) return err; break; + case FLOW_ACTION_MIRRED_INGRESS: case FLOW_ACTION_MIRRED: err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, false, tun_type, tun_out_cnt, - out_cnt, csum_updated, extack); + out_cnt, csum_updated, *pkt_host, + extack); if (err) return err; break; @@ -975,6 +1072,54 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, */ *csum_updated &= ~act->csum_flags; break; + case FLOW_ACTION_MPLS_PUSH: + if (*a_len + + sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS"); + return -EOPNOTSUPP; + } + + psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len]; + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + + err = nfp_fl_push_mpls(psh_m, act, extack); + if (err) + return err; + *a_len += sizeof(struct nfp_fl_push_mpls); + break; + case FLOW_ACTION_MPLS_POP: + if (*a_len + + sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS"); + return -EOPNOTSUPP; + } + + pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len]; + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + + nfp_fl_pop_mpls(pop_m, act); + *a_len += sizeof(struct nfp_fl_pop_mpls); + break; + case FLOW_ACTION_MPLS_MANGLE: + if (*a_len + + sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) { + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS"); + return -EOPNOTSUPP; + } + + set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len]; + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + + nfp_fl_set_mpls(set_m, act); + *a_len += sizeof(struct nfp_fl_set_mpls); + break; + case FLOW_ACTION_PTYPE: + /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */ + if (act->ptype != PACKET_HOST) + return -EOPNOTSUPP; + + *pkt_host = true; + break; default: /* Currently we do not handle any other actions. */ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list"); @@ -1030,6 +1175,7 @@ int nfp_flower_compile_action(struct nfp_app *app, struct nfp_flower_pedit_acts set_act; enum nfp_flower_tun_type tun_type; struct flow_action_entry *act; + bool pkt_host = false; u32 csum_updated = 0; memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); @@ -1046,7 +1192,7 @@ int nfp_flower_compile_action(struct nfp_app *app, err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, netdev, &tun_type, &tun_out_cnt, &out_cnt, &csum_updated, - &set_act, extack, i); + &set_act, &pkt_host, extack, i); if (err) return err; act_cnt++; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 0f1706ae5bfc..7eb2ec8969c3 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -68,8 +68,11 @@ #define NFP_FL_ACTION_OPCODE_OUTPUT 0 #define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1 #define NFP_FL_ACTION_OPCODE_POP_VLAN 2 +#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3 +#define NFP_FL_ACTION_OPCODE_POP_MPLS 4 #define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6 #define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7 +#define NFP_FL_ACTION_OPCODE_SET_MPLS 8 #define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9 #define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10 #define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11 @@ -217,7 +220,8 @@ struct nfp_fl_set_ipv4_tun { __be16 tun_flags; u8 ttl; u8 tos; - __be32 extra; + __be16 outer_vlan_tpid; + __be16 outer_vlan_tci; u8 tun_len; u8 res2; __be16 tun_proto; @@ -232,6 +236,24 @@ struct nfp_fl_push_geneve { u8 opt_data[]; }; +struct nfp_fl_push_mpls { + struct nfp_fl_act_head head; + __be16 ethtype; + __be32 lse; +}; + +struct nfp_fl_pop_mpls { + struct nfp_fl_act_head head; + __be16 ethtype; +}; + +struct nfp_fl_set_mpls { + struct nfp_fl_act_head head; + __be16 reserved; + __be32 lse_mask; + __be32 lse; +}; + /* Metadata with L2 (1W/4B) * ---------------------------------------------------------------- * 3 2 1 @@ -462,6 +484,7 @@ enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18, NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19, NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20, + NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21, NFP_FLOWER_CMSG_TYPE_MAX = 32, }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index eb846133943b..7a20447cca19 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -781,6 +781,7 @@ static int nfp_flower_init(struct nfp_app *app) INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv); + app_priv->pre_tun_rule_cnt = 0; return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index af9441d5787f..31d94592a7c0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -42,6 +42,7 @@ struct nfp_app; #define NFP_FL_FEATS_VLAN_PCP BIT(3) #define NFP_FL_FEATS_VF_RLIM BIT(4) #define NFP_FL_FEATS_FLOW_MOD BIT(5) +#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6) #define NFP_FL_FEATS_FLOW_MERGE BIT(30) #define NFP_FL_FEATS_LAG BIT(31) @@ -162,6 +163,7 @@ struct nfp_fl_internal_ports { * @qos_stats_work: Workqueue for qos stats processing * @qos_rate_limiters: Current active qos rate limiters * @qos_stats_lock: Lock on qos stats updates + * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded */ struct nfp_flower_priv { struct nfp_app *app; @@ -193,6 +195,7 @@ struct nfp_flower_priv { struct delayed_work qos_stats_work; unsigned int qos_rate_limiters; spinlock_t qos_stats_lock; /* Protect the qos stats */ + int pre_tun_rule_cnt; }; /** @@ -218,6 +221,7 @@ struct nfp_fl_qos { * @block_shared: Flag indicating if offload applies to shared blocks * @mac_list: List entry of reprs that share the same offloaded MAC * @qos_table: Stored info on filters implementing qos + * @on_bridge: Indicates if the repr is attached to a bridge */ struct nfp_flower_repr_priv { struct nfp_repr *nfp_repr; @@ -227,6 +231,7 @@ struct nfp_flower_repr_priv { bool block_shared; struct list_head mac_list; struct nfp_fl_qos qos_table; + bool on_bridge; }; /** @@ -280,6 +285,11 @@ struct nfp_fl_payload { char *action_data; struct list_head linked_flows; bool in_hw; + struct { + struct net_device *dev; + __be16 vlan_tci; + __be16 port_idx; + } pre_tun_rule; }; struct nfp_fl_payload_link { @@ -333,6 +343,11 @@ static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay) return flow_pay->tc_flower_cookie == (unsigned long)flow_pay; } +static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev) +{ + return netif_is_ovs_master(netdev); +} + int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, unsigned int host_ctx_split); void nfp_flower_metadata_cleanup(struct nfp_app *app); @@ -415,4 +430,8 @@ void nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev); u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, struct net_device *netdev); +int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, + struct nfp_fl_payload *flow); +int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, + struct nfp_fl_payload *flow); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 457bdc60f3ee..987ae221f6be 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -61,6 +61,11 @@ NFP_FLOWER_LAYER_IPV4 | \ NFP_FLOWER_LAYER_IPV6) +#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ + (NFP_FLOWER_LAYER_PORT | \ + NFP_FLOWER_LAYER_MAC | \ + NFP_FLOWER_LAYER_IPV4) + struct nfp_flower_merge_check { union { struct { @@ -489,6 +494,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) flow_pay->meta.flags = 0; INIT_LIST_HEAD(&flow_pay->linked_flows); flow_pay->in_hw = false; + flow_pay->pre_tun_rule.dev = NULL; return flow_pay; @@ -732,28 +738,62 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, return act_off; } -static int nfp_fl_verify_post_tun_acts(char *acts, int len) +static int +nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan) { struct nfp_fl_act_head *a; unsigned int act_off = 0; while (act_off < len) { a = (struct nfp_fl_act_head *)&acts[act_off]; - if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) + + if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off) + *vlan = (struct nfp_fl_push_vlan *)a; + else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) return -EOPNOTSUPP; act_off += a->len_lw << NFP_FL_LW_SIZ; } + /* Ensure any VLAN push also has an egress action. */ + if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan)) + return -EOPNOTSUPP; + return 0; } static int +nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan) +{ + struct nfp_fl_set_ipv4_tun *tun; + struct nfp_fl_act_head *a; + unsigned int act_off = 0; + + while (act_off < len) { + a = (struct nfp_fl_act_head *)&acts[act_off]; + + if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) { + tun = (struct nfp_fl_set_ipv4_tun *)a; + tun->outer_vlan_tpid = vlan->vlan_tpid; + tun->outer_vlan_tci = vlan->vlan_tci; + + return 0; + } + + act_off += a->len_lw << NFP_FL_LW_SIZ; + } + + /* Return error if no tunnel action is found. */ + return -EOPNOTSUPP; +} + +static int nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, struct nfp_fl_payload *sub_flow2, struct nfp_fl_payload *merge_flow) { unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; + struct nfp_fl_push_vlan *post_tun_push_vlan = NULL; bool tunnel_act = false; char *merge_act; int err; @@ -790,18 +830,36 @@ nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, sub2_act_len -= pre_off2; /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes - * a tunnel, sub_flow 2 can only have output actions for a valid merge. + * a tunnel, there are restrictions on what sub_flow 2 actions lead to a + * valid merge. */ if (tunnel_act) { char *post_tun_acts = &sub_flow2->action_data[pre_off2]; - err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len); + err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len, + &post_tun_push_vlan); if (err) return err; + + if (post_tun_push_vlan) { + pre_off2 += sizeof(*post_tun_push_vlan); + sub2_act_len -= sizeof(*post_tun_push_vlan); + } } /* Copy remaining actions from sub_flows 1 and 2. */ memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); + + if (post_tun_push_vlan) { + /* Update tunnel action in merge to include VLAN push. */ + err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len, + post_tun_push_vlan); + if (err) + return err; + + merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan); + } + merge_act += sub1_act_len; memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); @@ -945,6 +1003,106 @@ err_destroy_merge_flow: } /** + * nfp_flower_validate_pre_tun_rule() + * @app: Pointer to the APP handle + * @flow: Pointer to NFP flow representation of rule + * @extack: Netlink extended ACK report + * + * Verifies the flow as a pre-tunnel rule. + * + * Return: negative value on error, 0 if verified. + */ +static int +nfp_flower_validate_pre_tun_rule(struct nfp_app *app, + struct nfp_fl_payload *flow, + struct netlink_ext_ack *extack) +{ + struct nfp_flower_meta_tci *meta_tci; + struct nfp_flower_mac_mpls *mac; + struct nfp_fl_act_head *act; + u8 *mask = flow->mask_data; + bool vlan = false; + int act_offset; + u8 key_layer; + + meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; + if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { + u16 vlan_tci = be16_to_cpu(meta_tci->tci); + + vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; + flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); + vlan = true; + } else { + flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); + } + + key_layer = meta_tci->nfp_flow_key_layer; + if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); + return -EOPNOTSUPP; + } + + if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required"); + return -EOPNOTSUPP; + } + + /* Skip fields known to exist. */ + mask += sizeof(struct nfp_flower_meta_tci); + mask += sizeof(struct nfp_flower_in_port); + + /* Ensure destination MAC address is fully matched. */ + mac = (struct nfp_flower_mac_mpls *)mask; + if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked"); + return -EOPNOTSUPP; + } + + if (key_layer & NFP_FLOWER_LAYER_IPV4) { + int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags); + int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto); + int i; + + mask += sizeof(struct nfp_flower_mac_mpls); + + /* Ensure proto and flags are the only IP layer fields. */ + for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++) + if (mask[i] && i != ip_flags && i != ip_proto) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); + return -EOPNOTSUPP; + } + } + + /* Action must be a single egress or pop_vlan and egress. */ + act_offset = 0; + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; + if (vlan) { + if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action"); + return -EOPNOTSUPP; + } + + act_offset += act->len_lw << NFP_FL_LW_SIZ; + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; + } + + if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected"); + return -EOPNOTSUPP; + } + + act_offset += act->len_lw << NFP_FL_LW_SIZ; + + /* Ensure there are no more actions after egress. */ + if (act_offset != flow->meta.act_len) { + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action"); + return -EOPNOTSUPP; + } + + return 0; +} + +/** * nfp_flower_add_offload() - Adds a new flow to hardware. * @app: Pointer to the APP handle * @netdev: netdev structure. @@ -994,6 +1152,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; + if (flow_pay->pre_tun_rule.dev) { + err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack); + if (err) + goto err_destroy_flow; + } + err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack); if (err) goto err_destroy_flow; @@ -1006,8 +1170,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, goto err_release_metadata; } - err = nfp_flower_xmit_flow(app, flow_pay, - NFP_FLOWER_CMSG_TYPE_FLOW_ADD); + if (flow_pay->pre_tun_rule.dev) + err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); + else + err = nfp_flower_xmit_flow(app, flow_pay, + NFP_FLOWER_CMSG_TYPE_FLOW_ADD); if (err) goto err_remove_rhash; @@ -1149,8 +1316,11 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, goto err_free_merge_flow; } - err = nfp_flower_xmit_flow(app, nfp_flow, - NFP_FLOWER_CMSG_TYPE_FLOW_DEL); + if (nfp_flow->pre_tun_rule.dev) + err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); + else + err = nfp_flower_xmit_flow(app, nfp_flow, + NFP_FLOWER_CMSG_TYPE_FLOW_DEL); /* Fall through on error. */ err_free_merge_flow: @@ -1487,16 +1657,17 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app, return NOTIFY_OK; if (event == NETDEV_REGISTER) { - err = __tc_indr_block_cb_register(netdev, app, - nfp_flower_indr_setup_tc_cb, - app); + err = __flow_indr_block_cb_register(netdev, app, + nfp_flower_indr_setup_tc_cb, + app); if (err) nfp_flower_cmsg_warn(app, "Indirect block reg failed - %s\n", netdev->name); } else if (event == NETDEV_UNREGISTER) { - __tc_indr_block_cb_unregister(netdev, - nfp_flower_indr_setup_tc_cb, app); + __flow_indr_block_cb_unregister(netdev, + nfp_flower_indr_setup_tc_cb, + app); } return NOTIFY_OK; diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index f0ee982eb1b5..2600ce476d6b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -15,6 +15,24 @@ #define NFP_FL_MAX_ROUTES 32 +#define NFP_TUN_PRE_TUN_RULE_LIMIT 32 +#define NFP_TUN_PRE_TUN_RULE_DEL 0x1 +#define NFP_TUN_PRE_TUN_IDX_BIT 0x8 + +/** + * struct nfp_tun_pre_run_rule - rule matched before decap + * @flags: options for the rule offset + * @port_idx: index of destination MAC address for the rule + * @vlan_tci: VLAN info associated with MAC + * @host_ctx_id: stats context of rule to update + */ +struct nfp_tun_pre_tun_rule { + __be32 flags; + __be16 port_idx; + __be16 vlan_tci; + __be32 host_ctx_id; +}; + /** * struct nfp_tun_active_tuns - periodic message of active tunnels * @seq: sequence number of the message @@ -124,11 +142,12 @@ enum nfp_flower_mac_offload_cmd { /** * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC - * @ht_node: Hashtable entry - * @addr: Offloaded MAC address - * @index: Offloaded index for given MAC address - * @ref_count: Number of devs using this MAC address - * @repr_list: List of reprs sharing this MAC address + * @ht_node: Hashtable entry + * @addr: Offloaded MAC address + * @index: Offloaded index for given MAC address + * @ref_count: Number of devs using this MAC address + * @repr_list: List of reprs sharing this MAC address + * @bridge_count: Number of bridge/internal devs with MAC */ struct nfp_tun_offloaded_mac { struct rhash_head ht_node; @@ -136,6 +155,7 @@ struct nfp_tun_offloaded_mac { u16 index; int ref_count; struct list_head repr_list; + int bridge_count; }; static const struct rhashtable_params offloaded_macs_params = { @@ -556,6 +576,8 @@ nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry, list_del(&repr_priv->mac_list); list_add_tail(&repr_priv->mac_list, &entry->repr_list); + } else if (nfp_flower_is_supported_bridge(netdev)) { + entry->bridge_count++; } entry->ref_count++; @@ -572,20 +594,35 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { - nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); - return 0; + if (entry->bridge_count || + !nfp_flower_is_supported_bridge(netdev)) { + nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, + netdev, mod); + return 0; + } + + /* MAC is global but matches need to go to pre_tun table. */ + nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT; } - /* Assign a global index if non-repr or MAC address is now shared. */ - if (entry || !port) { - ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, - NFP_MAX_MAC_INDEX, GFP_KERNEL); - if (ida_idx < 0) - return ida_idx; + if (!nfp_mac_idx) { + /* Assign a global index if non-repr or MAC is now shared. */ + if (entry || !port) { + ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, + NFP_MAX_MAC_INDEX, GFP_KERNEL); + if (ida_idx < 0) + return ida_idx; - nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); - } else { - nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); + nfp_mac_idx = + nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); + + if (nfp_flower_is_supported_bridge(netdev)) + nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT; + + } else { + nfp_mac_idx = + nfp_tunnel_get_mac_idx_from_phy_port_id(port); + } } if (!entry) { @@ -654,6 +691,25 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, list_del(&repr_priv->mac_list); } + if (nfp_flower_is_supported_bridge(netdev)) { + entry->bridge_count--; + + if (!entry->bridge_count && entry->ref_count) { + u16 nfp_mac_idx; + + nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; + if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, + false)) { + nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", + netdev_name(netdev)); + return 0; + } + + entry->index = nfp_mac_idx; + return 0; + } + } + /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { u16 nfp_mac_idx; @@ -713,6 +769,9 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, return 0; repr_priv = repr->app_priv; + if (repr_priv->on_bridge) + return 0; + mac_offloaded = &repr_priv->mac_offloaded; off_mac = &repr_priv->offloaded_mac_addr[0]; port = nfp_repr_get_port_id(netdev); @@ -828,10 +887,119 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app, if (err) nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", netdev_name(netdev)); + } else if (event == NETDEV_CHANGEUPPER) { + /* If a repr is attached to a bridge then tunnel packets + * entering the physical port are directed through the bridge + * datapath and cannot be directly detunneled. Therefore, + * associated offloaded MACs and indexes should not be used + * by fw for detunneling. + */ + struct netdev_notifier_changeupper_info *info = ptr; + struct net_device *upper = info->upper_dev; + struct nfp_flower_repr_priv *repr_priv; + struct nfp_repr *repr; + + if (!nfp_netdev_is_nfp_repr(netdev) || + !nfp_flower_is_supported_bridge(upper)) + return NOTIFY_OK; + + repr = netdev_priv(netdev); + if (repr->app != app) + return NOTIFY_OK; + + repr_priv = repr->app_priv; + + if (info->linking) { + if (nfp_tunnel_offload_mac(app, netdev, + NFP_TUNNEL_MAC_OFFLOAD_DEL)) + nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n", + netdev_name(netdev)); + repr_priv->on_bridge = true; + } else { + repr_priv->on_bridge = false; + + if (!(netdev->flags & IFF_UP)) + return NOTIFY_OK; + + if (nfp_tunnel_offload_mac(app, netdev, + NFP_TUNNEL_MAC_OFFLOAD_ADD)) + nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", + netdev_name(netdev)); + } } return NOTIFY_OK; } +int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, + struct nfp_fl_payload *flow) +{ + struct nfp_flower_priv *app_priv = app->priv; + struct nfp_tun_offloaded_mac *mac_entry; + struct nfp_tun_pre_tun_rule payload; + struct net_device *internal_dev; + int err; + + if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT) + return -ENOSPC; + + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); + + internal_dev = flow->pre_tun_rule.dev; + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; + payload.host_ctx_id = flow->meta.host_ctx_id; + + /* Lookup MAC index for the pre-tunnel rule egress device. + * Note that because the device is always an internal port, it will + * have a constant global index so does not need to be tracked. + */ + mac_entry = nfp_tunnel_lookup_offloaded_macs(app, + internal_dev->dev_addr); + if (!mac_entry) + return -ENOENT; + + payload.port_idx = cpu_to_be16(mac_entry->index); + + /* Copy mac id and vlan to flow - dev may not exist at delete time. */ + flow->pre_tun_rule.vlan_tci = payload.vlan_tci; + flow->pre_tun_rule.port_idx = payload.port_idx; + + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, + sizeof(struct nfp_tun_pre_tun_rule), + (unsigned char *)&payload, GFP_KERNEL); + if (err) + return err; + + app_priv->pre_tun_rule_cnt++; + + return 0; +} + +int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, + struct nfp_fl_payload *flow) +{ + struct nfp_flower_priv *app_priv = app->priv; + struct nfp_tun_pre_tun_rule payload; + u32 tmp_flags = 0; + int err; + + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); + + tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL; + payload.flags = cpu_to_be32(tmp_flags); + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; + payload.port_idx = flow->pre_tun_rule.port_idx; + + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, + sizeof(struct nfp_tun_pre_tun_rule), + (unsigned char *)&payload, GFP_KERNEL); + if (err) + return err; + + app_priv->pre_tun_rule_cnt--; + + return 0; +} + int nfp_tunnel_config_start(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 60e57f08de80..4d282fc56009 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -352,7 +352,7 @@ nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name) err = request_firmware_direct(&fw, name, &pdev->dev); nfp_info(pf->cpp, " %s: %s\n", - name, err ? "not found" : "found, loading..."); + name, err ? "not found" : "found"); if (err) return NULL; @@ -430,8 +430,35 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) return nfp_net_fw_request(pdev, pf, fw_name); } +static int +nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, + const char *key, const char *default_val, int max_val, + int *value) +{ + char hwinfo[64]; + long hi_val; + int err; + + snprintf(hwinfo, sizeof(hwinfo), key); + err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), + default_val); + if (err) + return err; + + err = kstrtol(hwinfo, 0, &hi_val); + if (err || hi_val < 0 || hi_val > max_val) { + dev_warn(&pdev->dev, + "Invalid value '%s' from '%s', ignoring\n", + hwinfo, key); + err = kstrtol(default_val, 0, &hi_val); + } + + *value = hi_val; + return err; +} + /** - * nfp_net_fw_load() - Load the firmware image + * nfp_fw_load() - Load the firmware image * @pdev: PCI Device structure * @pf: NFP PF Device structure * @nsp: NFP SP handle @@ -441,44 +468,107 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) static int nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) { - const struct firmware *fw; + bool do_reset, fw_loaded = false; + const struct firmware *fw = NULL; + int err, reset, policy, ifcs = 0; + char *token, *ptr; + char hwinfo[64]; u16 interface; - int err; + + snprintf(hwinfo, sizeof(hwinfo), "abi_drv_load_ifc"); + err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), + NFP_NSP_DRV_LOAD_IFC_DEFAULT); + if (err) + return err; interface = nfp_cpp_interface(pf->cpp); - if (NFP_CPP_INTERFACE_UNIT_of(interface) != 0) { - /* Only Unit 0 should reset or load firmware */ + ptr = hwinfo; + while ((token = strsep(&ptr, ","))) { + unsigned long interface_hi; + + err = kstrtoul(token, 0, &interface_hi); + if (err) { + dev_err(&pdev->dev, + "Failed to parse interface '%s': %d\n", + token, err); + return err; + } + + ifcs++; + if (interface == interface_hi) + break; + } + + if (!token) { dev_info(&pdev->dev, "Firmware will be loaded by partner\n"); return 0; } + err = nfp_get_fw_policy_value(pdev, nsp, "abi_drv_reset", + NFP_NSP_DRV_RESET_DEFAULT, + NFP_NSP_DRV_RESET_NEVER, &reset); + if (err) + return err; + + err = nfp_get_fw_policy_value(pdev, nsp, "app_fw_from_flash", + NFP_NSP_APP_FW_LOAD_DEFAULT, + NFP_NSP_APP_FW_LOAD_PREF, &policy); + if (err) + return err; + fw = nfp_net_fw_find(pdev, pf); - if (!fw) { - if (nfp_nsp_has_stored_fw_load(nsp)) - nfp_nsp_load_stored_fw(nsp); - return 0; + do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS || + (fw && reset == NFP_NSP_DRV_RESET_DISK); + + if (do_reset) { + dev_info(&pdev->dev, "Soft-resetting the NFP\n"); + err = nfp_nsp_device_soft_reset(nsp); + if (err < 0) { + dev_err(&pdev->dev, + "Failed to soft reset the NFP: %d\n", err); + goto exit_release_fw; + } } - dev_info(&pdev->dev, "Soft-reset, loading FW image\n"); - err = nfp_nsp_device_soft_reset(nsp); - if (err < 0) { - dev_err(&pdev->dev, "Failed to soft reset the NFP: %d\n", - err); - goto exit_release_fw; - } + if (fw && policy != NFP_NSP_APP_FW_LOAD_FLASH) { + if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp)) + goto exit_release_fw; - err = nfp_nsp_load_fw(nsp, fw); - if (err < 0) { - dev_err(&pdev->dev, "FW loading failed: %d\n", err); - goto exit_release_fw; + err = nfp_nsp_load_fw(nsp, fw); + if (err < 0) { + dev_err(&pdev->dev, "FW loading failed: %d\n", + err); + goto exit_release_fw; + } + dev_info(&pdev->dev, "Finished loading FW image\n"); + fw_loaded = true; + } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && + nfp_nsp_has_stored_fw_load(nsp)) { + + /* Don't propagate this error to stick with legacy driver + * behavior, failure will be detected later during init. + */ + if (!nfp_nsp_load_stored_fw(nsp)) + dev_info(&pdev->dev, "Finished loading stored FW image\n"); + + /* Don't flag the fw_loaded in this case since other devices + * may reuse the firmware when configured this way + */ + } else { + dev_warn(&pdev->dev, "Didn't load firmware, please update flash or reconfigure card\n"); } - dev_info(&pdev->dev, "Finished loading FW image\n"); - exit_release_fw: release_firmware(fw); - return err < 0 ? err : 1; + /* We don't want to unload firmware when other devices may still be + * dependent on it, which could be the case if there are multiple + * devices that could load firmware. + */ + if (fw_loaded && ifcs == 1) + pf->unload_fw_on_remove = true; + + return err < 0 ? err : fw_loaded; } static void @@ -704,7 +794,7 @@ err_net_remove: err_fw_unload: kfree(pf->rtbl); nfp_mip_close(pf->mip); - if (pf->fw_loaded) + if (pf->unload_fw_on_remove) nfp_fw_unload(pf); kfree(pf->eth_tbl); kfree(pf->nspi); @@ -744,7 +834,7 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) vfree(pf->dumpspec); kfree(pf->rtbl); nfp_mip_close(pf->mip); - if (unload_fw && pf->fw_loaded) + if (unload_fw && pf->unload_fw_on_remove) nfp_fw_unload(pf); destroy_workqueue(pf->wq); @@ -815,6 +905,8 @@ static void __exit nfp_main_exit(void) module_init(nfp_main_init); module_exit(nfp_main_exit); +MODULE_FIRMWARE("netronome/nic_AMDA0058-0011_2x40.nffw"); +MODULE_FIRMWARE("netronome/nic_AMDA0058-0012_2x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw"); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index b7211f200d22..5d5812fd9317 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -64,6 +64,7 @@ struct nfp_dumpspec { * @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit) * @num_vfs: Number of SR-IOV VFs enabled * @fw_loaded: Is the firmware loaded? + * @unload_fw_on_remove:Do we need to unload firmware on driver removal? * @ctrl_vnic: Pointer to the control vNIC if available * @mip: MIP handle * @rtbl: RTsym table @@ -110,6 +111,7 @@ struct nfp_pf { unsigned int num_vfs; bool fw_loaded; + bool unload_fw_on_remove; struct nfp_net *ctrl_vnic; @@ -185,4 +187,7 @@ int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index, int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb, u16 pool_index, u32 size, enum devlink_sb_threshold_type threshold_type); + +int nfp_devlink_params_register(struct nfp_pf *pf); +void nfp_devlink_params_unregister(struct nfp_pf *pf); #endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 5d6c3738b494..250f510b1d21 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -66,7 +66,7 @@ #define NFP_NET_MAX_DMA_BITS 40 /* Default size for MTU and freelist buffer sizes */ -#define NFP_NET_DEFAULT_MTU 1500 +#define NFP_NET_DEFAULT_MTU 1500U /* Maximum number of bytes prepended to a packet */ #define NFP_NET_MAX_PREPEND 64 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 9903805717da..61aabffc8888 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -975,7 +975,7 @@ static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle) static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - const struct skb_frag_struct *frag; + const skb_frag_t *frag; int f, nr_frags, wr_idx, md_bytes; struct nfp_net_tx_ring *tx_ring; struct nfp_net_r_vector *r_vec; @@ -1155,7 +1155,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); while (todo--) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag; struct nfp_net_tx_buf *tx_buf; struct sk_buff *skb; int fidx, nr_frags; @@ -1270,7 +1270,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) static void nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag; struct netdev_queue *nd_q; while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { @@ -4116,14 +4116,7 @@ int nfp_net_init(struct nfp_net *nn) /* Set default MTU and Freelist buffer size */ if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) { - if (nn->app->ctrl_mtu <= nn->max_mtu) { - nn->dp.mtu = nn->app->ctrl_mtu; - } else { - if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX) - nn_warn(nn, "app requested MTU above max supported %u > %u\n", - nn->app->ctrl_mtu, nn->max_mtu); - nn->dp.mtu = nn->max_mtu; - } + nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu); } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) { nn->dp.mtu = nn->max_mtu; } else { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index ab7f2498e1c4..553c708694e8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -159,19 +159,13 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir) else strcpy(name, "ctrl-vnic"); nn->debugfs_dir = debugfs_create_dir(name, ddir); - if (IS_ERR_OR_NULL(nn->debugfs_dir)) - return; /* Create queue debugging sub-tree */ queues = debugfs_create_dir("queue", nn->debugfs_dir); - if (IS_ERR_OR_NULL(queues)) - return; rx = debugfs_create_dir("rx", queues); tx = debugfs_create_dir("tx", queues); xdp = debugfs_create_dir("xdp", queues); - if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx) || IS_ERR_OR_NULL(xdp)) - return; for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) { sprintf(name, "%d", i); @@ -190,16 +184,7 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir) struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) { - struct dentry *dev_dir; - - if (IS_ERR_OR_NULL(nfp_dir)) - return NULL; - - dev_dir = debugfs_create_dir(pci_name(pdev), nfp_dir); - if (IS_ERR_OR_NULL(dev_dir)) - return NULL; - - return dev_dir; + return debugfs_create_dir(pci_name(pdev), nfp_dir); } void nfp_net_debugfs_dir_clean(struct dentry **dir) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 986464d4a206..921db40047d7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -205,10 +205,8 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar, ctrl_bar += NFP_PF_CSR_SLICE_SIZE; /* Kill the vNIC if app init marked it as invalid */ - if (nn->port && nn->port->type == NFP_PORT_INVALID) { + if (nn->port && nn->port->type == NFP_PORT_INVALID) nfp_net_pf_free_vnic(pf, nn); - continue; - } } if (list_empty(&pf->vnics)) @@ -711,6 +709,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_devlink_unreg; + err = nfp_devlink_params_register(pf); + if (err) + goto err_shared_buf_unreg; + mutex_lock(&pf->lock); pf->ddir = nfp_net_debugfs_device_add(pf->pdev); @@ -744,6 +746,8 @@ err_free_vnics: err_clean_ddir: nfp_net_debugfs_dir_clean(&pf->ddir); mutex_unlock(&pf->lock); + nfp_devlink_params_unregister(pf); +err_shared_buf_unreg: nfp_shared_buf_unregister(pf); err_devlink_unreg: cancel_work_sync(&pf->port_refresh_work); @@ -773,6 +777,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf) mutex_unlock(&pf->lock); + nfp_devlink_params_unregister(pf); nfp_shared_buf_unregister(pf); devlink_unregister(priv_to_devlink(pf)); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c index 3cfecf105bde..85734c6badf5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c @@ -24,8 +24,9 @@ /* NFP6000 PL */ #define NFP_PL_DEVICE_ID 0x00000004 #define NFP_PL_DEVICE_ID_MASK GENMASK(7, 0) - -#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144 +#define NFP_PL_DEVICE_PART_MASK GENMASK(31, 16) +#define NFP_PL_DEVICE_MODEL_MASK (NFP_PL_DEVICE_PART_MASK | \ + NFP_PL_DEVICE_ID_MASK) /** * nfp_cpp_readl() - Read a u32 word from a CPP location @@ -120,22 +121,17 @@ int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, */ int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model) { - const u32 arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0); u32 reg; int err; - err = nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, model); - if (err < 0) - return err; - - /* The PL's PluDeviceID revision code is authoratative */ - *model &= ~0xff; err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID, ®); if (err < 0) return err; - *model |= (NFP_PL_DEVICE_ID_MASK & reg) - 0x10; + *model = reg & NFP_PL_DEVICE_MODEL_MASK; + if (*model & NFP_PL_DEVICE_ID_MASK) + *model -= 0x10; return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 9a08623c325d..f18e787fa9ad 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -96,6 +96,8 @@ enum nfp_nsp_cmd { SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */ SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */ + SPCODE_HWINFO_SET = 18, /* Set HWinfo entry */ + SPCODE_FW_LOADED = 19, /* Is application firmware loaded */ SPCODE_VERSIONS = 21, /* Report FW versions */ SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */ }; @@ -143,6 +145,8 @@ struct nfp_nsp { * @option: NFP SP Command Argument * @buf: NFP SP Buffer Address * @error_cb: Callback for interpreting option if error occurred + * @error_quiet:Don't print command error/warning. Protocol errors are still + * logged. */ struct nfp_nsp_command_arg { u16 code; @@ -151,6 +155,7 @@ struct nfp_nsp_command_arg { u32 option; u64 buf; void (*error_cb)(struct nfp_nsp *state, u32 ret_val); + bool error_quiet; }; /** @@ -405,8 +410,10 @@ __nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg) err = FIELD_GET(NSP_STATUS_RESULT, reg); if (err) { - nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n", - -err, (int)ret_val, arg->code); + if (!arg->error_quiet) + nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n", + -err, (int)ret_val, arg->code); + if (arg->error_cb) arg->error_cb(state, ret_val); else @@ -891,12 +898,14 @@ int nfp_nsp_load_stored_fw(struct nfp_nsp *state) } static int -__nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) +__nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size, + bool optional) { struct nfp_nsp_command_buf_arg hwinfo_lookup = { { .code = SPCODE_HWINFO_LOOKUP, .option = size, + .error_quiet = optional, }, .in_buf = buf, .in_size = size, @@ -913,7 +922,7 @@ int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE); - err = __nfp_nsp_hwinfo_lookup(state, buf, size); + err = __nfp_nsp_hwinfo_lookup(state, buf, size, false); if (err) return err; @@ -925,6 +934,66 @@ int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) return 0; } +int nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, void *buf, + unsigned int size, const char *default_val) +{ + int err; + + /* Ensure that the default value is usable irrespective of whether + * it is actually going to be used. + */ + if (strnlen(default_val, size) == size) + return -EINVAL; + + if (!nfp_nsp_has_hwinfo_lookup(state)) { + strcpy(buf, default_val); + return 0; + } + + size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE); + + err = __nfp_nsp_hwinfo_lookup(state, buf, size, true); + if (err) { + if (err == -ENOENT) { + strcpy(buf, default_val); + return 0; + } + + nfp_err(state->cpp, "NSP HWinfo lookup failed: %d\n", err); + return err; + } + + if (strnlen(buf, size) == size) { + nfp_err(state->cpp, "NSP HWinfo value not NULL-terminated\n"); + return -EINVAL; + } + + return 0; +} + +int nfp_nsp_hwinfo_set(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg hwinfo_set = { + { + .code = SPCODE_HWINFO_SET, + .option = size, + }, + .in_buf = buf, + .in_size = size, + }; + + return nfp_nsp_command_buf(state, &hwinfo_set); +} + +int nfp_nsp_fw_loaded(struct nfp_nsp *state) +{ + const struct nfp_nsp_command_arg arg = { + .code = SPCODE_FW_LOADED, + }; + + return __nfp_nsp_command(state, &arg); +} + int nfp_nsp_versions(struct nfp_nsp *state, void *buf, unsigned int size) { struct nfp_nsp_command_buf_arg versions = { diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index 22ee6985ee1c..1531c1870020 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -22,6 +22,10 @@ int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_mac_reinit(struct nfp_nsp *state); int nfp_nsp_load_stored_fw(struct nfp_nsp *state); int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, void *buf, + unsigned int size, const char *default_val); +int nfp_nsp_hwinfo_set(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_fw_loaded(struct nfp_nsp *state); int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index, unsigned int offset, void *data, unsigned int len, unsigned int *read_len); @@ -41,6 +45,16 @@ static inline bool nfp_nsp_has_hwinfo_lookup(struct nfp_nsp *state) return nfp_nsp_get_abi_ver_minor(state) > 24; } +static inline bool nfp_nsp_has_hwinfo_set(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 25; +} + +static inline bool nfp_nsp_has_fw_loaded(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 25; +} + static inline bool nfp_nsp_has_versions(struct nfp_nsp *state) { return nfp_nsp_get_abi_ver_minor(state) > 27; @@ -88,6 +102,21 @@ enum nfp_eth_fec { #define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT) #define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT) +/* Defines the valid values of the 'abi_drv_reset' hwinfo key */ +#define NFP_NSP_DRV_RESET_DISK 0 +#define NFP_NSP_DRV_RESET_ALWAYS 1 +#define NFP_NSP_DRV_RESET_NEVER 2 +#define NFP_NSP_DRV_RESET_DEFAULT "0" + +/* Defines the valid values of the 'app_fw_from_flash' hwinfo key */ +#define NFP_NSP_APP_FW_LOAD_DISK 0 +#define NFP_NSP_APP_FW_LOAD_FLASH 1 +#define NFP_NSP_APP_FW_LOAD_PREF 2 +#define NFP_NSP_APP_FW_LOAD_DEFAULT "2" + +/* Define the default value for the 'abi_drv_load_ifc' key */ +#define NFP_NSP_DRV_LOAD_IFC_DEFAULT "0x10ff" + /** * struct nfp_eth_table - ETH table information * @count: number of table entries diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index a6b4bfae4684..05d2b478c99b 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -6181,8 +6181,7 @@ static void nv_remove(struct pci_dev *pci_dev) #ifdef CONFIG_PM_SLEEP static int nv_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int i; diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 6f8d6584f809..5113ee647090 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -1258,8 +1258,7 @@ static int yellowfin_close(struct net_device *dev) yp->rx_skbuff[i] = NULL; } for (i = 0; i < TX_RING_SIZE; i++) { - if (yp->tx_skbuff[i]) - dev_kfree_skb(yp->tx_skbuff[i]); + dev_kfree_skb(yp->tx_skbuff[i]); yp->tx_skbuff[i] = NULL; } diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig new file mode 100644 index 000000000000..5ea570be8379 --- /dev/null +++ b/drivers/net/ethernet/pensando/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2019 Pensando Systems, Inc +# +# Pensando device configuration +# + +config NET_VENDOR_PENSANDO + bool "Pensando devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Pensando cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_PENSANDO + +config IONIC + tristate "Pensando Ethernet IONIC Support" + depends on 64BIT && PCI + help + This enables the support for the Pensando family of Ethernet + adapters. More specific information on this driver can be + found in + <file:Documentation/networking/device_drivers/pensando/ionic.rst>. + + To compile this driver as a module, choose M here. The module + will be called ionic. + +endif # NET_VENDOR_PENSANDO diff --git a/drivers/net/ethernet/pensando/Makefile b/drivers/net/ethernet/pensando/Makefile new file mode 100644 index 000000000000..21ce7499c122 --- /dev/null +++ b/drivers/net/ethernet/pensando/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Pensando network device drivers. +# + +obj-$(CONFIG_IONIC) += ionic/ diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile new file mode 100644 index 000000000000..29f304d75261 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2017 - 2019 Pensando Systems, Inc + +obj-$(CONFIG_IONIC) := ionic.o + +ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \ + ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \ + ionic_txrx.o ionic_stats.o diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h new file mode 100644 index 000000000000..7a7060677f15 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_H_ +#define _IONIC_H_ + +struct ionic_lif; + +#include "ionic_if.h" +#include "ionic_dev.h" +#include "ionic_devlink.h" + +#define IONIC_DRV_NAME "ionic" +#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver" +#define IONIC_DRV_VERSION "0.15.0-k" + +#define PCI_VENDOR_ID_PENSANDO 0x1dd8 + +#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002 +#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003 + +#define IONIC_SUBDEV_ID_NAPLES_25 0x4000 +#define IONIC_SUBDEV_ID_NAPLES_100_4 0x4001 +#define IONIC_SUBDEV_ID_NAPLES_100_8 0x4002 + +#define DEVCMD_TIMEOUT 10 + +struct ionic { + struct pci_dev *pdev; + struct device *dev; + struct devlink_port dl_port; + struct ionic_dev idev; + struct mutex dev_cmd_lock; /* lock for dev_cmd operations */ + struct dentry *dentry; + struct ionic_dev_bar bars[IONIC_BARS_MAX]; + unsigned int num_bars; + struct ionic_identity ident; + struct list_head lifs; + struct ionic_lif *master_lif; + unsigned int nnqs_per_lif; + unsigned int neqs_per_lif; + unsigned int ntxqs_per_lif; + unsigned int nrxqs_per_lif; + DECLARE_BITMAP(lifbits, IONIC_LIFS_MAX); + unsigned int nintrs; + DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX); + struct work_struct nb_work; + struct notifier_block nb; +}; + +struct ionic_admin_ctx { + struct completion work; + union ionic_adminq_cmd cmd; + union ionic_adminq_comp comp; +}; + +int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb, + ionic_cq_done_cb done_cb, void *done_arg); + +int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait); +int ionic_set_dma_mask(struct ionic *ionic); +int ionic_setup(struct ionic *ionic); + +int ionic_identify(struct ionic *ionic); +int ionic_init(struct ionic *ionic); +int ionic_reset(struct ionic *ionic); + +int ionic_port_identify(struct ionic *ionic); +int ionic_port_init(struct ionic *ionic); +int ionic_port_reset(struct ionic *ionic); + +#endif /* _IONIC_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus.h b/drivers/net/ethernet/pensando/ionic/ionic_bus.h new file mode 100644 index 000000000000..2f4d08c64910 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_BUS_H_ +#define _IONIC_BUS_H_ + +int ionic_bus_get_irq(struct ionic *ionic, unsigned int num); +const char *ionic_bus_info(struct ionic *ionic); +int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs); +void ionic_bus_free_irq_vectors(struct ionic *ionic); +int ionic_bus_register_driver(void); +void ionic_bus_unregister_driver(void); +void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num); +void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page); + +#endif /* _IONIC_BUS_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c new file mode 100644 index 000000000000..9a9ab8cb2cb3 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/pci.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_debugfs.h" + +/* Supported devices */ +static const struct pci_device_id ionic_id_table[] = { + { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF) }, + { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF) }, + { 0, } /* end of table */ +}; +MODULE_DEVICE_TABLE(pci, ionic_id_table); + +int ionic_bus_get_irq(struct ionic *ionic, unsigned int num) +{ + return pci_irq_vector(ionic->pdev, num); +} + +const char *ionic_bus_info(struct ionic *ionic) +{ + return pci_name(ionic->pdev); +} + +int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs) +{ + return pci_alloc_irq_vectors(ionic->pdev, nintrs, nintrs, + PCI_IRQ_MSIX); +} + +void ionic_bus_free_irq_vectors(struct ionic *ionic) +{ + pci_free_irq_vectors(ionic->pdev); +} + +static int ionic_map_bars(struct ionic *ionic) +{ + struct pci_dev *pdev = ionic->pdev; + struct device *dev = ionic->dev; + struct ionic_dev_bar *bars; + unsigned int i, j; + + bars = ionic->bars; + ionic->num_bars = 0; + + for (i = 0, j = 0; i < IONIC_BARS_MAX; i++) { + if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) + continue; + bars[j].len = pci_resource_len(pdev, i); + + /* only map the whole bar 0 */ + if (j > 0) { + bars[j].vaddr = NULL; + } else { + bars[j].vaddr = pci_iomap(pdev, i, bars[j].len); + if (!bars[j].vaddr) { + dev_err(dev, + "Cannot memory-map BAR %d, aborting\n", + i); + return -ENODEV; + } + } + + bars[j].bus_addr = pci_resource_start(pdev, i); + bars[j].res_index = i; + ionic->num_bars++; + j++; + } + + return 0; +} + +static void ionic_unmap_bars(struct ionic *ionic) +{ + struct ionic_dev_bar *bars = ionic->bars; + unsigned int i; + + for (i = 0; i < IONIC_BARS_MAX; i++) { + if (bars[i].vaddr) { + iounmap(bars[i].vaddr); + bars[i].bus_addr = 0; + bars[i].vaddr = NULL; + bars[i].len = 0; + } + } +} + +void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num) +{ + return pci_iomap_range(ionic->pdev, + ionic->bars[IONIC_PCI_BAR_DBELL].res_index, + (u64)page_num << PAGE_SHIFT, PAGE_SIZE); +} + +void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page) +{ + iounmap(page); +} + +static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct ionic *ionic; + int err; + + ionic = ionic_devlink_alloc(dev); + if (!ionic) + return -ENOMEM; + + ionic->pdev = pdev; + ionic->dev = dev; + pci_set_drvdata(pdev, ionic); + mutex_init(&ionic->dev_cmd_lock); + + /* Query system for DMA addressing limitation for the device. */ + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN)); + if (err) { + dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting. err=%d\n", + err); + goto err_out_clear_drvdata; + } + + ionic_debugfs_add_dev(ionic); + + /* Setup PCI device */ + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(dev, "Cannot enable PCI device: %d, aborting\n", err); + goto err_out_debugfs_del_dev; + } + + err = pci_request_regions(pdev, IONIC_DRV_NAME); + if (err) { + dev_err(dev, "Cannot request PCI regions: %d, aborting\n", err); + goto err_out_pci_disable_device; + } + + pci_set_master(pdev); + + err = ionic_map_bars(ionic); + if (err) + goto err_out_pci_clear_master; + + /* Configure the device */ + err = ionic_setup(ionic); + if (err) { + dev_err(dev, "Cannot setup device: %d, aborting\n", err); + goto err_out_unmap_bars; + } + + err = ionic_identify(ionic); + if (err) { + dev_err(dev, "Cannot identify device: %d, aborting\n", err); + goto err_out_teardown; + } + + err = ionic_init(ionic); + if (err) { + dev_err(dev, "Cannot init device: %d, aborting\n", err); + goto err_out_teardown; + } + + /* Configure the ports */ + err = ionic_port_identify(ionic); + if (err) { + dev_err(dev, "Cannot identify port: %d, aborting\n", err); + goto err_out_reset; + } + + err = ionic_port_init(ionic); + if (err) { + dev_err(dev, "Cannot init port: %d, aborting\n", err); + goto err_out_reset; + } + + /* Configure LIFs */ + err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC, + &ionic->ident.lif); + if (err) { + dev_err(dev, "Cannot identify LIFs: %d, aborting\n", err); + goto err_out_port_reset; + } + + err = ionic_lifs_size(ionic); + if (err) { + dev_err(dev, "Cannot size LIFs: %d, aborting\n", err); + goto err_out_port_reset; + } + + err = ionic_lifs_alloc(ionic); + if (err) { + dev_err(dev, "Cannot allocate LIFs: %d, aborting\n", err); + goto err_out_free_irqs; + } + + err = ionic_lifs_init(ionic); + if (err) { + dev_err(dev, "Cannot init LIFs: %d, aborting\n", err); + goto err_out_free_lifs; + } + + err = ionic_lifs_register(ionic); + if (err) { + dev_err(dev, "Cannot register LIFs: %d, aborting\n", err); + goto err_out_deinit_lifs; + } + + err = ionic_devlink_register(ionic); + if (err) { + dev_err(dev, "Cannot register devlink: %d\n", err); + goto err_out_deregister_lifs; + } + + return 0; + +err_out_deregister_lifs: + ionic_lifs_unregister(ionic); +err_out_deinit_lifs: + ionic_lifs_deinit(ionic); +err_out_free_lifs: + ionic_lifs_free(ionic); +err_out_free_irqs: + ionic_bus_free_irq_vectors(ionic); +err_out_port_reset: + ionic_port_reset(ionic); +err_out_reset: + ionic_reset(ionic); +err_out_teardown: + ionic_dev_teardown(ionic); +err_out_unmap_bars: + ionic_unmap_bars(ionic); + pci_release_regions(pdev); +err_out_pci_clear_master: + pci_clear_master(pdev); +err_out_pci_disable_device: + pci_disable_device(pdev); +err_out_debugfs_del_dev: + ionic_debugfs_del_dev(ionic); +err_out_clear_drvdata: + mutex_destroy(&ionic->dev_cmd_lock); + ionic_devlink_free(ionic); + + return err; +} + +static void ionic_remove(struct pci_dev *pdev) +{ + struct ionic *ionic = pci_get_drvdata(pdev); + + if (!ionic) + return; + + ionic_devlink_unregister(ionic); + ionic_lifs_unregister(ionic); + ionic_lifs_deinit(ionic); + ionic_lifs_free(ionic); + ionic_bus_free_irq_vectors(ionic); + ionic_port_reset(ionic); + ionic_reset(ionic); + ionic_dev_teardown(ionic); + ionic_unmap_bars(ionic); + pci_release_regions(pdev); + pci_clear_master(pdev); + pci_disable_device(pdev); + ionic_debugfs_del_dev(ionic); + mutex_destroy(&ionic->dev_cmd_lock); + ionic_devlink_free(ionic); +} + +static struct pci_driver ionic_driver = { + .name = IONIC_DRV_NAME, + .id_table = ionic_id_table, + .probe = ionic_probe, + .remove = ionic_remove, +}; + +int ionic_bus_register_driver(void) +{ + return pci_register_driver(&ionic_driver); +} + +void ionic_bus_unregister_driver(void) +{ + pci_unregister_driver(&ionic_driver); +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c new file mode 100644 index 000000000000..7afc4a365b75 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/pci.h> +#include <linux/netdevice.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_debugfs.h" + +#ifdef CONFIG_DEBUG_FS + +static struct dentry *ionic_dir; + +void ionic_debugfs_create(void) +{ + ionic_dir = debugfs_create_dir(IONIC_DRV_NAME, NULL); +} + +void ionic_debugfs_destroy(void) +{ + debugfs_remove_recursive(ionic_dir); +} + +void ionic_debugfs_add_dev(struct ionic *ionic) +{ + ionic->dentry = debugfs_create_dir(ionic_bus_info(ionic), ionic_dir); +} + +void ionic_debugfs_del_dev(struct ionic *ionic) +{ + debugfs_remove_recursive(ionic->dentry); + ionic->dentry = NULL; +} + +static int identity_show(struct seq_file *seq, void *v) +{ + struct ionic *ionic = seq->private; + struct ionic_identity *ident; + + ident = &ionic->ident; + + seq_printf(seq, "nlifs: %d\n", ident->dev.nlifs); + seq_printf(seq, "nintrs: %d\n", ident->dev.nintrs); + seq_printf(seq, "ndbpgs_per_lif: %d\n", ident->dev.ndbpgs_per_lif); + seq_printf(seq, "intr_coal_mult: %d\n", ident->dev.intr_coal_mult); + seq_printf(seq, "intr_coal_div: %d\n", ident->dev.intr_coal_div); + + seq_printf(seq, "max_ucast_filters: %d\n", ident->lif.eth.max_ucast_filters); + seq_printf(seq, "max_mcast_filters: %d\n", ident->lif.eth.max_mcast_filters); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(identity); + +void ionic_debugfs_add_ident(struct ionic *ionic) +{ + debugfs_create_file("identity", 0400, ionic->dentry, + ionic, &identity_fops) ? 0 : -EOPNOTSUPP; +} + +void ionic_debugfs_add_sizes(struct ionic *ionic) +{ + debugfs_create_u32("nlifs", 0400, ionic->dentry, + (u32 *)&ionic->ident.dev.nlifs); + debugfs_create_u32("nintrs", 0400, ionic->dentry, &ionic->nintrs); + + debugfs_create_u32("ntxqs_per_lif", 0400, ionic->dentry, + (u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_TXQ]); + debugfs_create_u32("nrxqs_per_lif", 0400, ionic->dentry, + (u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_RXQ]); +} + +static int q_tail_show(struct seq_file *seq, void *v) +{ + struct ionic_queue *q = seq->private; + + seq_printf(seq, "%d\n", q->tail->index); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(q_tail); + +static int q_head_show(struct seq_file *seq, void *v) +{ + struct ionic_queue *q = seq->private; + + seq_printf(seq, "%d\n", q->head->index); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(q_head); + +static int cq_tail_show(struct seq_file *seq, void *v) +{ + struct ionic_cq *cq = seq->private; + + seq_printf(seq, "%d\n", cq->tail->index); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(cq_tail); + +static const struct debugfs_reg32 intr_ctrl_regs[] = { + { .name = "coal_init", .offset = 0, }, + { .name = "mask", .offset = 4, }, + { .name = "credits", .offset = 8, }, + { .name = "mask_on_assert", .offset = 12, }, + { .name = "coal_timer", .offset = 16, }, +}; + +void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct dentry *q_dentry, *cq_dentry, *intr_dentry, *stats_dentry; + struct ionic_dev *idev = &lif->ionic->idev; + struct debugfs_regset32 *intr_ctrl_regset; + struct ionic_intr_info *intr = &qcq->intr; + struct debugfs_blob_wrapper *desc_blob; + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + + qcq->dentry = debugfs_create_dir(q->name, lif->dentry); + + debugfs_create_x32("total_size", 0400, qcq->dentry, &qcq->total_size); + debugfs_create_x64("base_pa", 0400, qcq->dentry, &qcq->base_pa); + + q_dentry = debugfs_create_dir("q", qcq->dentry); + + debugfs_create_u32("index", 0400, q_dentry, &q->index); + debugfs_create_x64("base_pa", 0400, q_dentry, &q->base_pa); + if (qcq->flags & IONIC_QCQ_F_SG) { + debugfs_create_x64("sg_base_pa", 0400, q_dentry, + &q->sg_base_pa); + debugfs_create_u32("sg_desc_size", 0400, q_dentry, + &q->sg_desc_size); + } + debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs); + debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size); + debugfs_create_u32("pid", 0400, q_dentry, &q->pid); + debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index); + debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type); + debugfs_create_u64("drop", 0400, q_dentry, &q->drop); + debugfs_create_u64("stop", 0400, q_dentry, &q->stop); + debugfs_create_u64("wake", 0400, q_dentry, &q->wake); + + debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops); + debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops); + + desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL); + if (!desc_blob) + return; + desc_blob->data = q->base; + desc_blob->size = (unsigned long)q->num_descs * q->desc_size; + debugfs_create_blob("desc_blob", 0400, q_dentry, desc_blob); + + if (qcq->flags & IONIC_QCQ_F_SG) { + desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL); + if (!desc_blob) + return; + desc_blob->data = q->sg_base; + desc_blob->size = (unsigned long)q->num_descs * q->sg_desc_size; + debugfs_create_blob("sg_desc_blob", 0400, q_dentry, + desc_blob); + } + + cq_dentry = debugfs_create_dir("cq", qcq->dentry); + + debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa); + debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs); + debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size); + debugfs_create_u8("done_color", 0400, cq_dentry, + (u8 *)&cq->done_color); + + debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops); + + desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL); + if (!desc_blob) + return; + desc_blob->data = cq->base; + desc_blob->size = (unsigned long)cq->num_descs * cq->desc_size; + debugfs_create_blob("desc_blob", 0400, cq_dentry, desc_blob); + + if (qcq->flags & IONIC_QCQ_F_INTR) { + intr_dentry = debugfs_create_dir("intr", qcq->dentry); + + debugfs_create_u32("index", 0400, intr_dentry, + &intr->index); + debugfs_create_u32("vector", 0400, intr_dentry, + &intr->vector); + + intr_ctrl_regset = devm_kzalloc(dev, sizeof(*intr_ctrl_regset), + GFP_KERNEL); + if (!intr_ctrl_regset) + return; + intr_ctrl_regset->regs = intr_ctrl_regs; + intr_ctrl_regset->nregs = ARRAY_SIZE(intr_ctrl_regs); + intr_ctrl_regset->base = &idev->intr_ctrl[intr->index]; + + debugfs_create_regset32("intr_ctrl", 0400, intr_dentry, + intr_ctrl_regset); + } + + if (qcq->flags & IONIC_QCQ_F_NOTIFYQ) { + stats_dentry = debugfs_create_dir("notifyblock", qcq->dentry); + + debugfs_create_u64("eid", 0400, stats_dentry, + (u64 *)&lif->info->status.eid); + debugfs_create_u16("link_status", 0400, stats_dentry, + (u16 *)&lif->info->status.link_status); + debugfs_create_u32("link_speed", 0400, stats_dentry, + (u32 *)&lif->info->status.link_speed); + debugfs_create_u16("link_down_count", 0400, stats_dentry, + (u16 *)&lif->info->status.link_down_count); + } +} + +static int netdev_show(struct seq_file *seq, void *v) +{ + struct net_device *netdev = seq->private; + + seq_printf(seq, "%s\n", netdev->name); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(netdev); + +void ionic_debugfs_add_lif(struct ionic_lif *lif) +{ + lif->dentry = debugfs_create_dir(lif->name, lif->ionic->dentry); + debugfs_create_file("netdev", 0400, lif->dentry, + lif->netdev, &netdev_fops); +} + +void ionic_debugfs_del_lif(struct ionic_lif *lif) +{ + debugfs_remove_recursive(lif->dentry); + lif->dentry = NULL; +} + +void ionic_debugfs_del_qcq(struct ionic_qcq *qcq) +{ + debugfs_remove_recursive(qcq->dentry); + qcq->dentry = NULL; +} + +#endif diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.h b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.h new file mode 100644 index 000000000000..c44ebde170b6 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_DEBUGFS_H_ +#define _IONIC_DEBUGFS_H_ + +#include <linux/debugfs.h> + +#ifdef CONFIG_DEBUG_FS + +void ionic_debugfs_create(void); +void ionic_debugfs_destroy(void); +void ionic_debugfs_add_dev(struct ionic *ionic); +void ionic_debugfs_del_dev(struct ionic *ionic); +void ionic_debugfs_add_ident(struct ionic *ionic); +void ionic_debugfs_add_sizes(struct ionic *ionic); +void ionic_debugfs_add_lif(struct ionic_lif *lif); +void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq); +void ionic_debugfs_del_lif(struct ionic_lif *lif); +void ionic_debugfs_del_qcq(struct ionic_qcq *qcq); +#else +static inline void ionic_debugfs_create(void) { } +static inline void ionic_debugfs_destroy(void) { } +static inline void ionic_debugfs_add_dev(struct ionic *ionic) { } +static inline void ionic_debugfs_del_dev(struct ionic *ionic) { } +static inline void ionic_debugfs_add_ident(struct ionic *ionic) { } +static inline void ionic_debugfs_add_sizes(struct ionic *ionic) { } +static inline void ionic_debugfs_add_lif(struct ionic_lif *lif) { } +static inline void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) { } +static inline void ionic_debugfs_del_lif(struct ionic_lif *lif) { } +static inline void ionic_debugfs_del_qcq(struct ionic_qcq *qcq) { } +#endif + +#endif /* _IONIC_DEBUGFS_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c new file mode 100644 index 000000000000..d168a6435322 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/etherdevice.h> +#include "ionic.h" +#include "ionic_dev.h" +#include "ionic_lif.h" + +void ionic_init_devinfo(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + + idev->dev_info.asic_type = ioread8(&idev->dev_info_regs->asic_type); + idev->dev_info.asic_rev = ioread8(&idev->dev_info_regs->asic_rev); + + memcpy_fromio(idev->dev_info.fw_version, + idev->dev_info_regs->fw_version, + IONIC_DEVINFO_FWVERS_BUFLEN); + + memcpy_fromio(idev->dev_info.serial_num, + idev->dev_info_regs->serial_num, + IONIC_DEVINFO_SERIAL_BUFLEN); + + idev->dev_info.fw_version[IONIC_DEVINFO_FWVERS_BUFLEN] = 0; + idev->dev_info.serial_num[IONIC_DEVINFO_SERIAL_BUFLEN] = 0; + + dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version); +} + +int ionic_dev_setup(struct ionic *ionic) +{ + struct ionic_dev_bar *bar = ionic->bars; + unsigned int num_bars = ionic->num_bars; + struct ionic_dev *idev = &ionic->idev; + struct device *dev = ionic->dev; + u32 sig; + + /* BAR0: dev_cmd and interrupts */ + if (num_bars < 1) { + dev_err(dev, "No bars found, aborting\n"); + return -EFAULT; + } + + if (bar->len < IONIC_BAR0_SIZE) { + dev_err(dev, "Resource bar size %lu too small, aborting\n", + bar->len); + return -EFAULT; + } + + idev->dev_info_regs = bar->vaddr + IONIC_BAR0_DEV_INFO_REGS_OFFSET; + idev->dev_cmd_regs = bar->vaddr + IONIC_BAR0_DEV_CMD_REGS_OFFSET; + idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET; + idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET; + + sig = ioread32(&idev->dev_info_regs->signature); + if (sig != IONIC_DEV_INFO_SIGNATURE) { + dev_err(dev, "Incompatible firmware signature %x", sig); + return -EFAULT; + } + + ionic_init_devinfo(ionic); + + /* BAR1: doorbells */ + bar++; + if (num_bars < 2) { + dev_err(dev, "Doorbell bar missing, aborting\n"); + return -EFAULT; + } + + idev->db_pages = bar->vaddr; + idev->phy_db_pages = bar->bus_addr; + + return 0; +} + +void ionic_dev_teardown(struct ionic *ionic) +{ + /* place holder */ +} + +/* Devcmd Interface */ +u8 ionic_dev_cmd_status(struct ionic_dev *idev) +{ + return ioread8(&idev->dev_cmd_regs->comp.comp.status); +} + +bool ionic_dev_cmd_done(struct ionic_dev *idev) +{ + return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE; +} + +void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp) +{ + memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp)); +} + +void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) +{ + memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd)); + iowrite32(0, &idev->dev_cmd_regs->done); + iowrite32(1, &idev->dev_cmd_regs->doorbell); +} + +/* Device commands */ +void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver) +{ + union ionic_dev_cmd cmd = { + .identify.opcode = IONIC_CMD_IDENTIFY, + .identify.ver = ver, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_init(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .init.opcode = IONIC_CMD_INIT, + .init.type = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_reset(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .reset.opcode = IONIC_CMD_RESET, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +/* Port commands */ +void ionic_dev_cmd_port_identify(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_init.opcode = IONIC_CMD_PORT_IDENTIFY, + .port_init.index = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_init(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_init.opcode = IONIC_CMD_PORT_INIT, + .port_init.index = 0, + .port_init.info_pa = cpu_to_le64(idev->port_info_pa), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_reset(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_reset.opcode = IONIC_CMD_PORT_RESET, + .port_reset.index = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_STATE, + .port_setattr.state = state, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_SPEED, + .port_setattr.speed = cpu_to_le32(speed), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_AUTONEG, + .port_setattr.an_enable = an_enable, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_FEC, + .port_setattr.fec_type = fec_type, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_PAUSE, + .port_setattr.pause_type = pause_type, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +/* LIF commands */ +void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver) +{ + union ionic_dev_cmd cmd = { + .lif_identify.opcode = IONIC_CMD_LIF_IDENTIFY, + .lif_identify.type = type, + .lif_identify.ver = ver, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index, + dma_addr_t info_pa) +{ + union ionic_dev_cmd cmd = { + .lif_init.opcode = IONIC_CMD_LIF_INIT, + .lif_init.index = cpu_to_le16(lif_index), + .lif_init.info_pa = cpu_to_le64(info_pa), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index) +{ + union ionic_dev_cmd cmd = { + .lif_init.opcode = IONIC_CMD_LIF_RESET, + .lif_init.index = cpu_to_le16(lif_index), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, + u16 lif_index, u16 intr_index) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + + union ionic_dev_cmd cmd = { + .q_init.opcode = IONIC_CMD_Q_INIT, + .q_init.lif_index = cpu_to_le16(lif_index), + .q_init.type = q->type, + .q_init.index = cpu_to_le32(q->index), + .q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ | + IONIC_QINIT_F_ENA), + .q_init.pid = cpu_to_le16(q->pid), + .q_init.intr_index = cpu_to_le16(intr_index), + .q_init.ring_size = ilog2(q->num_descs), + .q_init.ring_base = cpu_to_le64(q->base_pa), + .q_init.cq_ring_base = cpu_to_le64(cq->base_pa), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +int ionic_db_page_num(struct ionic_lif *lif, int pid) +{ + return (lif->hw_index * lif->dbid_count) + pid; +} + +int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, + struct ionic_intr_info *intr, + unsigned int num_descs, size_t desc_size) +{ + struct ionic_cq_info *cur; + unsigned int ring_size; + unsigned int i; + + if (desc_size == 0 || !is_power_of_2(num_descs)) + return -EINVAL; + + ring_size = ilog2(num_descs); + if (ring_size < 2 || ring_size > 16) + return -EINVAL; + + cq->lif = lif; + cq->bound_intr = intr; + cq->num_descs = num_descs; + cq->desc_size = desc_size; + cq->tail = cq->info; + cq->done_color = 1; + + cur = cq->info; + + for (i = 0; i < num_descs; i++) { + if (i + 1 == num_descs) { + cur->next = cq->info; + cur->last = true; + } else { + cur->next = cur + 1; + } + cur->index = i; + cur++; + } + + return 0; +} + +void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa) +{ + struct ionic_cq_info *cur; + unsigned int i; + + cq->base = base; + cq->base_pa = base_pa; + + for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++) + cur->cq_desc = base + (i * cq->desc_size); +} + +void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q) +{ + cq->bound_q = q; +} + +unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, + ionic_cq_cb cb, ionic_cq_done_cb done_cb, + void *done_arg) +{ + unsigned int work_done = 0; + + if (work_to_do == 0) + return 0; + + while (cb(cq, cq->tail)) { + if (cq->tail->last) + cq->done_color = !cq->done_color; + cq->tail = cq->tail->next; + DEBUG_STATS_CQE_CNT(cq); + + if (++work_done >= work_to_do) + break; + } + + if (work_done && done_cb) + done_cb(done_arg); + + return work_done; +} + +int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, + struct ionic_queue *q, unsigned int index, const char *name, + unsigned int num_descs, size_t desc_size, + size_t sg_desc_size, unsigned int pid) +{ + struct ionic_desc_info *cur; + unsigned int ring_size; + unsigned int i; + + if (desc_size == 0 || !is_power_of_2(num_descs)) + return -EINVAL; + + ring_size = ilog2(num_descs); + if (ring_size < 2 || ring_size > 16) + return -EINVAL; + + q->lif = lif; + q->idev = idev; + q->index = index; + q->num_descs = num_descs; + q->desc_size = desc_size; + q->sg_desc_size = sg_desc_size; + q->tail = q->info; + q->head = q->tail; + q->pid = pid; + + snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index); + + cur = q->info; + + for (i = 0; i < num_descs; i++) { + if (i + 1 == num_descs) + cur->next = q->info; + else + cur->next = cur + 1; + cur->index = i; + cur->left = num_descs - i; + cur++; + } + + return 0; +} + +void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) +{ + struct ionic_desc_info *cur; + unsigned int i; + + q->base = base; + q->base_pa = base_pa; + + for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) + cur->desc = base + (i * q->desc_size); +} + +void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) +{ + struct ionic_desc_info *cur; + unsigned int i; + + q->sg_base = base; + q->sg_base_pa = base_pa; + + for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) + cur->sg_desc = base + (i * q->sg_desc_size); +} + +void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, + void *cb_arg) +{ + struct device *dev = q->lif->ionic->dev; + struct ionic_lif *lif = q->lif; + + q->head->cb = cb; + q->head->cb_arg = cb_arg; + q->head = q->head->next; + + dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n", + q->lif->index, q->name, q->hw_type, q->hw_index, + q->head->index, ring_doorbell); + + if (ring_doorbell) + ionic_dbell_ring(lif->kern_dbpage, q->hw_type, + q->dbval | q->head->index); +} + +static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) +{ + unsigned int mask, tail, head; + + mask = q->num_descs - 1; + tail = q->tail->index; + head = q->head->index; + + return ((pos - tail) & mask) < ((head - tail) & mask); +} + +void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, + unsigned int stop_index) +{ + struct ionic_desc_info *desc_info; + ionic_desc_cb cb; + void *cb_arg; + + /* check for empty queue */ + if (q->tail->index == q->head->index) + return; + + /* stop index must be for a descriptor that is not yet completed */ + if (unlikely(!ionic_q_is_posted(q, stop_index))) + dev_err(q->lif->ionic->dev, + "ionic stop is not posted %s stop %u tail %u head %u\n", + q->name, stop_index, q->tail->index, q->head->index); + + do { + desc_info = q->tail; + q->tail = desc_info->next; + + cb = desc_info->cb; + cb_arg = desc_info->cb_arg; + + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + + if (cb) + cb(q, desc_info, cq_info, cb_arg); + } while (desc_info->index != stop_index); +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h new file mode 100644 index 000000000000..9610aeb7d5f4 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_DEV_H_ +#define _IONIC_DEV_H_ + +#include <linux/mutex.h> +#include <linux/workqueue.h> + +#include "ionic_if.h" +#include "ionic_regs.h" + +#define IONIC_MIN_MTU ETH_MIN_MTU +#define IONIC_MAX_MTU 9194 +#define IONIC_MAX_TXRX_DESC 16384 +#define IONIC_MIN_TXRX_DESC 16 +#define IONIC_DEF_TXRX_DESC 4096 +#define IONIC_LIFS_MAX 1024 +#define IONIC_ITR_COAL_USEC_DEFAULT 64 + +#define IONIC_DEV_CMD_REG_VERSION 1 +#define IONIC_DEV_INFO_REG_COUNT 32 +#define IONIC_DEV_CMD_REG_COUNT 32 + +struct ionic_dev_bar { + void __iomem *vaddr; + phys_addr_t bus_addr; + unsigned long len; + int res_index; +}; + +/* Registers */ +static_assert(sizeof(struct ionic_intr) == 32); + +static_assert(sizeof(struct ionic_doorbell) == 8); +static_assert(sizeof(struct ionic_intr_status) == 8); +static_assert(sizeof(union ionic_dev_regs) == 4096); +static_assert(sizeof(union ionic_dev_info_regs) == 2048); +static_assert(sizeof(union ionic_dev_cmd_regs) == 2048); +static_assert(sizeof(struct ionic_lif_stats) == 1024); + +static_assert(sizeof(struct ionic_admin_cmd) == 64); +static_assert(sizeof(struct ionic_admin_comp) == 16); +static_assert(sizeof(struct ionic_nop_cmd) == 64); +static_assert(sizeof(struct ionic_nop_comp) == 16); + +/* Device commands */ +static_assert(sizeof(struct ionic_dev_identify_cmd) == 64); +static_assert(sizeof(struct ionic_dev_identify_comp) == 16); +static_assert(sizeof(struct ionic_dev_init_cmd) == 64); +static_assert(sizeof(struct ionic_dev_init_comp) == 16); +static_assert(sizeof(struct ionic_dev_reset_cmd) == 64); +static_assert(sizeof(struct ionic_dev_reset_comp) == 16); +static_assert(sizeof(struct ionic_dev_getattr_cmd) == 64); +static_assert(sizeof(struct ionic_dev_getattr_comp) == 16); +static_assert(sizeof(struct ionic_dev_setattr_cmd) == 64); +static_assert(sizeof(struct ionic_dev_setattr_comp) == 16); + +/* Port commands */ +static_assert(sizeof(struct ionic_port_identify_cmd) == 64); +static_assert(sizeof(struct ionic_port_identify_comp) == 16); +static_assert(sizeof(struct ionic_port_init_cmd) == 64); +static_assert(sizeof(struct ionic_port_init_comp) == 16); +static_assert(sizeof(struct ionic_port_reset_cmd) == 64); +static_assert(sizeof(struct ionic_port_reset_comp) == 16); +static_assert(sizeof(struct ionic_port_getattr_cmd) == 64); +static_assert(sizeof(struct ionic_port_getattr_comp) == 16); +static_assert(sizeof(struct ionic_port_setattr_cmd) == 64); +static_assert(sizeof(struct ionic_port_setattr_comp) == 16); + +/* LIF commands */ +static_assert(sizeof(struct ionic_lif_init_cmd) == 64); +static_assert(sizeof(struct ionic_lif_init_comp) == 16); +static_assert(sizeof(struct ionic_lif_reset_cmd) == 64); +static_assert(sizeof(ionic_lif_reset_comp) == 16); +static_assert(sizeof(struct ionic_lif_getattr_cmd) == 64); +static_assert(sizeof(struct ionic_lif_getattr_comp) == 16); +static_assert(sizeof(struct ionic_lif_setattr_cmd) == 64); +static_assert(sizeof(struct ionic_lif_setattr_comp) == 16); + +static_assert(sizeof(struct ionic_q_init_cmd) == 64); +static_assert(sizeof(struct ionic_q_init_comp) == 16); +static_assert(sizeof(struct ionic_q_control_cmd) == 64); +static_assert(sizeof(ionic_q_control_comp) == 16); + +static_assert(sizeof(struct ionic_rx_mode_set_cmd) == 64); +static_assert(sizeof(ionic_rx_mode_set_comp) == 16); +static_assert(sizeof(struct ionic_rx_filter_add_cmd) == 64); +static_assert(sizeof(struct ionic_rx_filter_add_comp) == 16); +static_assert(sizeof(struct ionic_rx_filter_del_cmd) == 64); +static_assert(sizeof(ionic_rx_filter_del_comp) == 16); + +/* RDMA commands */ +static_assert(sizeof(struct ionic_rdma_reset_cmd) == 64); +static_assert(sizeof(struct ionic_rdma_queue_cmd) == 64); + +/* Events */ +static_assert(sizeof(struct ionic_notifyq_cmd) == 4); +static_assert(sizeof(union ionic_notifyq_comp) == 64); +static_assert(sizeof(struct ionic_notifyq_event) == 64); +static_assert(sizeof(struct ionic_link_change_event) == 64); +static_assert(sizeof(struct ionic_reset_event) == 64); +static_assert(sizeof(struct ionic_heartbeat_event) == 64); +static_assert(sizeof(struct ionic_log_event) == 64); + +/* I/O */ +static_assert(sizeof(struct ionic_txq_desc) == 16); +static_assert(sizeof(struct ionic_txq_sg_desc) == 128); +static_assert(sizeof(struct ionic_txq_comp) == 16); + +static_assert(sizeof(struct ionic_rxq_desc) == 16); +static_assert(sizeof(struct ionic_rxq_sg_desc) == 128); +static_assert(sizeof(struct ionic_rxq_comp) == 16); + +struct ionic_devinfo { + u8 asic_type; + u8 asic_rev; + char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN + 1]; + char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN + 1]; +}; + +struct ionic_dev { + union ionic_dev_info_regs __iomem *dev_info_regs; + union ionic_dev_cmd_regs __iomem *dev_cmd_regs; + + u64 __iomem *db_pages; + dma_addr_t phy_db_pages; + + struct ionic_intr __iomem *intr_ctrl; + u64 __iomem *intr_status; + + u32 port_info_sz; + struct ionic_port_info *port_info; + dma_addr_t port_info_pa; + + struct ionic_devinfo dev_info; +}; + +struct ionic_cq_info { + void *cq_desc; + struct ionic_cq_info *next; + unsigned int index; + bool last; +}; + +struct ionic_queue; +struct ionic_qcq; +struct ionic_desc_info; + +typedef void (*ionic_desc_cb)(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg); + +struct ionic_desc_info { + void *desc; + void *sg_desc; + struct ionic_desc_info *next; + unsigned int index; + unsigned int left; + ionic_desc_cb cb; + void *cb_arg; +}; + +#define QUEUE_NAME_MAX_SZ 32 + +struct ionic_queue { + u64 dbell_count; + u64 drop; + u64 stop; + u64 wake; + struct ionic_lif *lif; + struct ionic_desc_info *info; + struct ionic_desc_info *tail; + struct ionic_desc_info *head; + struct ionic_dev *idev; + unsigned int index; + unsigned int type; + unsigned int hw_index; + unsigned int hw_type; + u64 dbval; + void *base; + void *sg_base; + dma_addr_t base_pa; + dma_addr_t sg_base_pa; + unsigned int num_descs; + unsigned int desc_size; + unsigned int sg_desc_size; + unsigned int pid; + char name[QUEUE_NAME_MAX_SZ]; +}; + +#define INTR_INDEX_NOT_ASSIGNED -1 +#define INTR_NAME_MAX_SZ 32 + +struct ionic_intr_info { + char name[INTR_NAME_MAX_SZ]; + unsigned int index; + unsigned int vector; + u64 rearm_count; + unsigned int cpu; + cpumask_t affinity_mask; +}; + +struct ionic_cq { + void *base; + dma_addr_t base_pa; + struct ionic_lif *lif; + struct ionic_cq_info *info; + struct ionic_cq_info *tail; + struct ionic_queue *bound_q; + struct ionic_intr_info *bound_intr; + bool done_color; + unsigned int num_descs; + u64 compl_count; + unsigned int desc_size; +}; + +struct ionic; + +static inline void ionic_intr_init(struct ionic_dev *idev, + struct ionic_intr_info *intr, + unsigned long index) +{ + ionic_intr_clean(idev->intr_ctrl, index); + intr->index = index; +} + +static inline unsigned int ionic_q_space_avail(struct ionic_queue *q) +{ + unsigned int avail = q->tail->index; + + if (q->head->index >= avail) + avail += q->head->left - 1; + else + avail -= q->head->index + 1; + + return avail; +} + +static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want) +{ + return ionic_q_space_avail(q) >= want; +} + +void ionic_init_devinfo(struct ionic *ionic); +int ionic_dev_setup(struct ionic *ionic); +void ionic_dev_teardown(struct ionic *ionic); + +void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd); +u8 ionic_dev_cmd_status(struct ionic_dev *idev); +bool ionic_dev_cmd_done(struct ionic_dev *idev); +void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp); + +void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver); +void ionic_dev_cmd_init(struct ionic_dev *idev); +void ionic_dev_cmd_reset(struct ionic_dev *idev); + +void ionic_dev_cmd_port_identify(struct ionic_dev *idev); +void ionic_dev_cmd_port_init(struct ionic_dev *idev); +void ionic_dev_cmd_port_reset(struct ionic_dev *idev); +void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state); +void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed); +void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable); +void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type); +void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type); + +void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver); +void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index, + dma_addr_t addr); +void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index); +void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, + u16 lif_index, u16 intr_index); + +int ionic_db_page_num(struct ionic_lif *lif, int pid); + +int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, + struct ionic_intr_info *intr, + unsigned int num_descs, size_t desc_size); +void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa); +void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q); +typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, struct ionic_cq_info *cq_info); +typedef void (*ionic_cq_done_cb)(void *done_arg); +unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, + ionic_cq_cb cb, ionic_cq_done_cb done_cb, + void *done_arg); + +int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, + struct ionic_queue *q, unsigned int index, const char *name, + unsigned int num_descs, size_t desc_size, + size_t sg_desc_size, unsigned int pid); +void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); +void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); +void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, + void *cb_arg); +void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start); +void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, + unsigned int stop_index); + +#endif /* _IONIC_DEV_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c new file mode 100644 index 000000000000..af1647afa4e8 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/module.h> +#include <linux/netdevice.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_devlink.h" + +static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct ionic *ionic = devlink_priv(dl); + struct ionic_dev *idev = &ionic->idev; + char buf[16]; + int err = 0; + + err = devlink_info_driver_name_put(req, IONIC_DRV_NAME); + if (err) + goto info_out; + + err = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + idev->dev_info.fw_version); + if (err) + goto info_out; + + snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_type); + err = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, + buf); + if (err) + goto info_out; + + snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_rev); + err = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, + buf); + if (err) + goto info_out; + + err = devlink_info_serial_number_put(req, idev->dev_info.serial_num); + +info_out: + return err; +} + +static const struct devlink_ops ionic_dl_ops = { + .info_get = ionic_dl_info_get, +}; + +struct ionic *ionic_devlink_alloc(struct device *dev) +{ + struct devlink *dl; + + dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic)); + + return devlink_priv(dl); +} + +void ionic_devlink_free(struct ionic *ionic) +{ + struct devlink *dl = priv_to_devlink(ionic); + + devlink_free(dl); +} + +int ionic_devlink_register(struct ionic *ionic) +{ + struct devlink *dl = priv_to_devlink(ionic); + int err; + + err = devlink_register(dl, ionic->dev); + if (err) { + dev_warn(ionic->dev, "devlink_register failed: %d\n", err); + return err; + } + + devlink_port_attrs_set(&ionic->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, + 0, false, 0, NULL, 0); + err = devlink_port_register(dl, &ionic->dl_port, 0); + if (err) + dev_err(ionic->dev, "devlink_port_register failed: %d\n", err); + else + devlink_port_type_eth_set(&ionic->dl_port, + ionic->master_lif->netdev); + + return err; +} + +void ionic_devlink_unregister(struct ionic *ionic) +{ + struct devlink *dl = priv_to_devlink(ionic); + + devlink_port_unregister(&ionic->dl_port); + devlink_unregister(dl); +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h new file mode 100644 index 000000000000..0690172fc57a --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_DEVLINK_H_ +#define _IONIC_DEVLINK_H_ + +#include <net/devlink.h> + +struct ionic *ionic_devlink_alloc(struct device *dev); +void ionic_devlink_free(struct ionic *ionic); +int ionic_devlink_register(struct ionic *ionic); +void ionic_devlink_unregister(struct ionic *ionic); + +#endif /* _IONIC_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c new file mode 100644 index 000000000000..7d10265f782a --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/module.h> +#include <linux/netdevice.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_ethtool.h" +#include "ionic_stats.h" + +static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define PRIV_F_SW_DBG_STATS BIT(0) + "sw-dbg-stats", +}; +#define PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings) + +static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf) +{ + u32 i; + + for (i = 0; i < ionic_num_stats_grps; i++) + ionic_stats_groups[i].get_strings(lif, &buf); +} + +static void ionic_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *buf) +{ + struct ionic_lif *lif; + u32 i; + + lif = netdev_priv(netdev); + + memset(buf, 0, stats->n_stats * sizeof(*buf)); + for (i = 0; i < ionic_num_stats_grps; i++) + ionic_stats_groups[i].get_values(lif, &buf); +} + +static int ionic_get_stats_count(struct ionic_lif *lif) +{ + int i, num_stats = 0; + + for (i = 0; i < ionic_num_stats_grps; i++) + num_stats += ionic_stats_groups[i].get_count(lif); + + return num_stats; +} + +static int ionic_get_sset_count(struct net_device *netdev, int sset) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int count = 0; + + switch (sset) { + case ETH_SS_STATS: + count = ionic_get_stats_count(lif); + break; + case ETH_SS_PRIV_FLAGS: + count = PRIV_FLAGS_COUNT; + break; + } + return count; +} + +static void ionic_get_strings(struct net_device *netdev, + u32 sset, u8 *buf) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_STATS: + ionic_get_stats_strings(lif, buf); + break; + case ETH_SS_PRIV_FLAGS: + memcpy(buf, ionic_priv_flags_strings, + PRIV_FLAGS_COUNT * ETH_GSTRING_LEN); + break; + } +} + +static void ionic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + + strlcpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, IONIC_DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version, + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, ionic_bus_info(ionic), + sizeof(drvinfo->bus_info)); +} + +static int ionic_get_regs_len(struct net_device *netdev) +{ + return (IONIC_DEV_INFO_REG_COUNT + IONIC_DEV_CMD_REG_COUNT) * sizeof(u32); +} + +static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct ionic_lif *lif = netdev_priv(netdev); + unsigned int size; + + regs->version = IONIC_DEV_CMD_REG_VERSION; + + size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); + memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size); + + size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); + memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size); +} + +static int ionic_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; + int copper_seen = 0; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + + /* The port_info data is found in a DMA space that the NIC keeps + * up-to-date, so there's no need to request the data from the + * NIC, we already have it in our memory space. + */ + + switch (le16_to_cpu(idev->port_info->status.xcvr.pid)) { + /* Copper */ + case IONIC_XCVR_PID_QSFP_100G_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR4_Full); + copper_seen++; + break; + case IONIC_XCVR_PID_QSFP_40GBASE_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + copper_seen++; + break; + case IONIC_XCVR_PID_SFP_25GBASE_CR_S: + case IONIC_XCVR_PID_SFP_25GBASE_CR_L: + case IONIC_XCVR_PID_SFP_25GBASE_CR_N: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + copper_seen++; + break; + case IONIC_XCVR_PID_SFP_10GBASE_AOC: + case IONIC_XCVR_PID_SFP_10GBASE_CU: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseCR_Full); + copper_seen++; + break; + + /* Fibre */ + case IONIC_XCVR_PID_QSFP_100G_SR4: + case IONIC_XCVR_PID_QSFP_100G_AOC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseSR4_Full); + break; + case IONIC_XCVR_PID_QSFP_100G_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseLR4_ER4_Full); + break; + case IONIC_XCVR_PID_QSFP_100G_ER4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseLR4_ER4_Full); + break; + case IONIC_XCVR_PID_QSFP_40GBASE_SR4: + case IONIC_XCVR_PID_QSFP_40GBASE_AOC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + break; + case IONIC_XCVR_PID_QSFP_40GBASE_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + break; + case IONIC_XCVR_PID_SFP_25GBASE_SR: + case IONIC_XCVR_PID_SFP_25GBASE_AOC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_SR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_LR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_LRM: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLRM_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_ER: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + break; + case IONIC_XCVR_PID_UNKNOWN: + /* This means there's no module plugged in */ + break; + default: + dev_info(lif->ionic->dev, "unknown xcvr type pid=%d / 0x%x\n", + idev->port_info->status.xcvr.pid, + idev->port_info->status.xcvr.pid); + break; + } + + bitmap_copy(ks->link_modes.advertising, ks->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + if (idev->port_info->config.fec_type == IONIC_PORT_FEC_TYPE_FC) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER); + else if (idev->port_info->config.fec_type == IONIC_PORT_FEC_TYPE_RS) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + + if (idev->port_info->status.xcvr.phy == IONIC_PHY_TYPE_COPPER || + copper_seen) + ks->base.port = PORT_DA; + else if (idev->port_info->status.xcvr.phy == IONIC_PHY_TYPE_FIBER) + ks->base.port = PORT_FIBRE; + else + ks->base.port = PORT_NONE; + + if (ks->base.port != PORT_NONE) { + ks->base.speed = le32_to_cpu(lif->info->status.link_speed); + + if (le16_to_cpu(lif->info->status.link_status)) + ks->base.duplex = DUPLEX_FULL; + else + ks->base.duplex = DUPLEX_UNKNOWN; + + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + + if (idev->port_info->config.an_enable) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + Autoneg); + ks->base.autoneg = AUTONEG_ENABLE; + } + } + + return 0; +} + +static int ionic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + struct ionic_dev *idev; + u32 req_rs, req_fc; + u8 fec_type; + int err = 0; + + idev = &lif->ionic->idev; + fec_type = IONIC_PORT_FEC_TYPE_NONE; + + /* set autoneg */ + if (ks->base.autoneg != idev->port_info->config.an_enable) { + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_autoneg(idev, ks->base.autoneg); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return err; + } + + /* set speed */ + if (ks->base.speed != le32_to_cpu(idev->port_info->config.speed)) { + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_speed(idev, ks->base.speed); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return err; + } + + /* set FEC */ + req_rs = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_RS); + req_fc = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_BASER); + if (req_rs && req_fc) { + netdev_info(netdev, "Only select one FEC mode at a time\n"); + return -EINVAL; + } else if (req_fc) { + fec_type = IONIC_PORT_FEC_TYPE_FC; + } else if (req_rs) { + fec_type = IONIC_PORT_FEC_TYPE_RS; + } else if (!(req_rs | req_fc)) { + fec_type = IONIC_PORT_FEC_TYPE_NONE; + } + + if (fec_type != idev->port_info->config.fec_type) { + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_fec(idev, fec_type); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return err; + } + + return 0; +} + +static void ionic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ionic_lif *lif = netdev_priv(netdev); + u8 pause_type; + + pause->autoneg = 0; + + pause_type = lif->ionic->idev.port_info->config.pause_type; + if (pause_type) { + pause->rx_pause = pause_type & IONIC_PAUSE_F_RX ? 1 : 0; + pause->tx_pause = pause_type & IONIC_PAUSE_F_TX ? 1 : 0; + } +} + +static int ionic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + u32 requested_pause; + int err; + + if (pause->autoneg) + return -EOPNOTSUPP; + + /* change both at the same time */ + requested_pause = IONIC_PORT_PAUSE_TYPE_LINK; + if (pause->rx_pause) + requested_pause |= IONIC_PAUSE_F_RX; + if (pause->tx_pause) + requested_pause |= IONIC_PAUSE_F_TX; + + if (requested_pause == lif->ionic->idev.port_info->config.pause_type) + return 0; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_pause(&lif->ionic->idev, requested_pause); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return err; + + return 0; +} + +static int ionic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + /* Tx uses Rx interrupt */ + coalesce->tx_coalesce_usecs = lif->rx_coalesce_usecs; + coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs; + + return 0; +} + +static int ionic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_identity *ident; + struct ionic_qcq *qcq; + unsigned int i; + u32 usecs; + u32 coal; + + if (coalesce->rx_max_coalesced_frames || + coalesce->rx_coalesce_usecs_irq || + coalesce->rx_max_coalesced_frames_irq || + coalesce->tx_max_coalesced_frames || + coalesce->tx_coalesce_usecs_irq || + coalesce->tx_max_coalesced_frames_irq || + coalesce->stats_block_coalesce_usecs || + coalesce->use_adaptive_rx_coalesce || + coalesce->use_adaptive_tx_coalesce || + coalesce->pkt_rate_low || + coalesce->rx_coalesce_usecs_low || + coalesce->rx_max_coalesced_frames_low || + coalesce->tx_coalesce_usecs_low || + coalesce->tx_max_coalesced_frames_low || + coalesce->pkt_rate_high || + coalesce->rx_coalesce_usecs_high || + coalesce->rx_max_coalesced_frames_high || + coalesce->tx_coalesce_usecs_high || + coalesce->tx_max_coalesced_frames_high || + coalesce->rate_sample_interval) + return -EINVAL; + + ident = &lif->ionic->ident; + if (ident->dev.intr_coal_div == 0) { + netdev_warn(netdev, "bad HW value in dev.intr_coal_div = %d\n", + ident->dev.intr_coal_div); + return -EIO; + } + + /* Tx uses Rx interrupt, so only change Rx */ + if (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) { + netdev_warn(netdev, "only the rx-usecs can be changed\n"); + return -EINVAL; + } + + coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs); + + if (coal > IONIC_INTR_CTRL_COAL_MAX) + return -ERANGE; + + /* If they asked for non-zero and it resolved to zero, bump it up */ + if (!coal && coalesce->rx_coalesce_usecs) + coal = 1; + + /* Convert it back to get device resolution */ + usecs = ionic_coal_hw_to_usec(lif->ionic, coal); + + if (usecs != lif->rx_coalesce_usecs) { + lif->rx_coalesce_usecs = usecs; + + if (test_bit(IONIC_LIF_UP, lif->state)) { + for (i = 0; i < lif->nxqs; i++) { + qcq = lif->rxqcqs[i].qcq; + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + qcq->intr.index, coal); + } + } + } + + return 0; +} + +static void ionic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + ring->tx_max_pending = IONIC_MAX_TXRX_DESC; + ring->tx_pending = lif->ntxq_descs; + ring->rx_max_pending = IONIC_MAX_TXRX_DESC; + ring->rx_pending = lif->nrxq_descs; +} + +static int ionic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ionic_lif *lif = netdev_priv(netdev); + bool running; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) { + netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n"); + return -EINVAL; + } + + if (!is_power_of_2(ring->tx_pending) || + !is_power_of_2(ring->rx_pending)) { + netdev_info(netdev, "Descriptor count must be a power of 2\n"); + return -EINVAL; + } + + /* if nothing to do return success */ + if (ring->tx_pending == lif->ntxq_descs && + ring->rx_pending == lif->nrxq_descs) + return 0; + + if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET)) + return -EBUSY; + + running = test_bit(IONIC_LIF_UP, lif->state); + if (running) + ionic_stop(netdev); + + lif->ntxq_descs = ring->tx_pending; + lif->nrxq_descs = ring->rx_pending; + + if (running) + ionic_open(netdev); + clear_bit(IONIC_LIF_QUEUE_RESET, lif->state); + + return 0; +} + +static void ionic_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + /* report maximum channels */ + ch->max_combined = lif->ionic->ntxqs_per_lif; + + /* report current channels */ + ch->combined_count = lif->nxqs; +} + +static int ionic_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct ionic_lif *lif = netdev_priv(netdev); + bool running; + + if (!ch->combined_count || ch->other_count || + ch->rx_count || ch->tx_count) + return -EINVAL; + + if (ch->combined_count == lif->nxqs) + return 0; + + if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET)) + return -EBUSY; + + running = test_bit(IONIC_LIF_UP, lif->state); + if (running) + ionic_stop(netdev); + + lif->nxqs = ch->combined_count; + + if (running) + ionic_open(netdev); + clear_bit(IONIC_LIF_QUEUE_RESET, lif->state); + + return 0; +} + +static u32 ionic_get_priv_flags(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + u32 priv_flags = 0; + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) + priv_flags |= PRIV_F_SW_DBG_STATS; + + return priv_flags; +} + +static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct ionic_lif *lif = netdev_priv(netdev); + u32 flags = lif->flags; + + clear_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state); + if (priv_flags & PRIV_F_SW_DBG_STATS) + set_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state); + + if (flags != lif->flags) + lif->flags = flags; + + return 0; +} + +static int ionic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *info, u32 *rules) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = lif->nxqs; + break; + default: + netdev_err(netdev, "Command parameter %d is not supported\n", + info->cmd); + err = -EOPNOTSUPP; + } + + return err; +} + +static u32 ionic_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + return le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); +} + +static u32 ionic_get_rxfh_key_size(struct net_device *netdev) +{ + return IONIC_RSS_HASH_KEY_SIZE; +} + +static int ionic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ionic_lif *lif = netdev_priv(netdev); + unsigned int i, tbl_sz; + + if (indir) { + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + for (i = 0; i < tbl_sz; i++) + indir[i] = lif->rss_ind_tbl[i]; + } + + if (key) + memcpy(key, lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int ionic_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + err = ionic_lif_rss_config(lif, lif->rss_types, key, indir); + if (err) + return err; + + return 0; +} + +static int ionic_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct ionic_lif *lif = netdev_priv(dev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + lif->rx_copybreak = *(u32 *)data; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int ionic_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, void *data) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = lif->rx_copybreak; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int ionic_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) + +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; + struct ionic_xcvr_status *xcvr; + + xcvr = &idev->port_info->status.xcvr; + + /* report the module data type and length */ + switch (xcvr->sprom[0]) { + case 0x03: /* SFP */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + break; + case 0x0D: /* QSFP */ + case 0x11: /* QSFP28 */ + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + default: + netdev_info(netdev, "unknown xcvr type 0x%02x\n", + xcvr->sprom[0]); + break; + } + + return 0; +} + +static int ionic_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; + struct ionic_xcvr_status *xcvr; + char tbuf[sizeof(xcvr->sprom)]; + int count = 10; + u32 len; + + /* The NIC keeps the module prom up-to-date in the DMA space + * so we can simply copy the module bytes into the data buffer. + */ + xcvr = &idev->port_info->status.xcvr; + len = min_t(u32, sizeof(xcvr->sprom), ee->len); + + do { + memcpy(data, xcvr->sprom, len); + memcpy(tbuf, xcvr->sprom, len); + + /* Let's make sure we got a consistent copy */ + if (!memcmp(data, tbuf, len)) + break; + + } while (--count); + + if (!count) + return -ETIMEDOUT; + + return 0; +} + +static int ionic_nway_reset(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + int err = 0; + + /* flap the link to force auto-negotiation */ + + mutex_lock(&ionic->dev_cmd_lock); + + ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_DOWN); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + + if (!err) { + ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + } + + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +static const struct ethtool_ops ionic_ethtool_ops = { + .get_drvinfo = ionic_get_drvinfo, + .get_regs_len = ionic_get_regs_len, + .get_regs = ionic_get_regs, + .get_link = ethtool_op_get_link, + .get_link_ksettings = ionic_get_link_ksettings, + .get_coalesce = ionic_get_coalesce, + .set_coalesce = ionic_set_coalesce, + .get_ringparam = ionic_get_ringparam, + .set_ringparam = ionic_set_ringparam, + .get_channels = ionic_get_channels, + .set_channels = ionic_set_channels, + .get_strings = ionic_get_strings, + .get_ethtool_stats = ionic_get_stats, + .get_sset_count = ionic_get_sset_count, + .get_priv_flags = ionic_get_priv_flags, + .set_priv_flags = ionic_set_priv_flags, + .get_rxnfc = ionic_get_rxnfc, + .get_rxfh_indir_size = ionic_get_rxfh_indir_size, + .get_rxfh_key_size = ionic_get_rxfh_key_size, + .get_rxfh = ionic_get_rxfh, + .set_rxfh = ionic_set_rxfh, + .get_tunable = ionic_get_tunable, + .set_tunable = ionic_set_tunable, + .get_module_info = ionic_get_module_info, + .get_module_eeprom = ionic_get_module_eeprom, + .get_pauseparam = ionic_get_pauseparam, + .set_pauseparam = ionic_set_pauseparam, + .set_link_ksettings = ionic_set_link_ksettings, + .nway_reset = ionic_nway_reset, +}; + +void ionic_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ionic_ethtool_ops; +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.h b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.h new file mode 100644 index 000000000000..38b91b1d70ae --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_ETHTOOL_H_ +#define _IONIC_ETHTOOL_H_ + +void ionic_ethtool_set_ops(struct net_device *netdev); + +#endif /* _IONIC_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h new file mode 100644 index 000000000000..5bfdda19f64d --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -0,0 +1,2482 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */ +/* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */ + +#ifndef _IONIC_IF_H_ +#define _IONIC_IF_H_ + +#pragma pack(push, 1) + +#define IONIC_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */ +#define IONIC_DEV_INFO_VERSION 1 +#define IONIC_IFNAMSIZ 16 + +/** + * Commands + */ +enum ionic_cmd_opcode { + IONIC_CMD_NOP = 0, + + /* Device commands */ + IONIC_CMD_IDENTIFY = 1, + IONIC_CMD_INIT = 2, + IONIC_CMD_RESET = 3, + IONIC_CMD_GETATTR = 4, + IONIC_CMD_SETATTR = 5, + + /* Port commands */ + IONIC_CMD_PORT_IDENTIFY = 10, + IONIC_CMD_PORT_INIT = 11, + IONIC_CMD_PORT_RESET = 12, + IONIC_CMD_PORT_GETATTR = 13, + IONIC_CMD_PORT_SETATTR = 14, + + /* LIF commands */ + IONIC_CMD_LIF_IDENTIFY = 20, + IONIC_CMD_LIF_INIT = 21, + IONIC_CMD_LIF_RESET = 22, + IONIC_CMD_LIF_GETATTR = 23, + IONIC_CMD_LIF_SETATTR = 24, + + IONIC_CMD_RX_MODE_SET = 30, + IONIC_CMD_RX_FILTER_ADD = 31, + IONIC_CMD_RX_FILTER_DEL = 32, + + /* Queue commands */ + IONIC_CMD_Q_INIT = 40, + IONIC_CMD_Q_CONTROL = 41, + + /* RDMA commands */ + IONIC_CMD_RDMA_RESET_LIF = 50, + IONIC_CMD_RDMA_CREATE_EQ = 51, + IONIC_CMD_RDMA_CREATE_CQ = 52, + IONIC_CMD_RDMA_CREATE_ADMINQ = 53, + + /* QoS commands */ + IONIC_CMD_QOS_CLASS_IDENTIFY = 240, + IONIC_CMD_QOS_CLASS_INIT = 241, + IONIC_CMD_QOS_CLASS_RESET = 242, + + /* Firmware commands */ + IONIC_CMD_FW_DOWNLOAD = 254, + IONIC_CMD_FW_CONTROL = 255, +}; + +/** + * Command Return codes + */ +enum ionic_status_code { + IONIC_RC_SUCCESS = 0, /* Success */ + IONIC_RC_EVERSION = 1, /* Incorrect version for request */ + IONIC_RC_EOPCODE = 2, /* Invalid cmd opcode */ + IONIC_RC_EIO = 3, /* I/O error */ + IONIC_RC_EPERM = 4, /* Permission denied */ + IONIC_RC_EQID = 5, /* Bad qid */ + IONIC_RC_EQTYPE = 6, /* Bad qtype */ + IONIC_RC_ENOENT = 7, /* No such element */ + IONIC_RC_EINTR = 8, /* operation interrupted */ + IONIC_RC_EAGAIN = 9, /* Try again */ + IONIC_RC_ENOMEM = 10, /* Out of memory */ + IONIC_RC_EFAULT = 11, /* Bad address */ + IONIC_RC_EBUSY = 12, /* Device or resource busy */ + IONIC_RC_EEXIST = 13, /* object already exists */ + IONIC_RC_EINVAL = 14, /* Invalid argument */ + IONIC_RC_ENOSPC = 15, /* No space left or alloc failure */ + IONIC_RC_ERANGE = 16, /* Parameter out of range */ + IONIC_RC_BAD_ADDR = 17, /* Descriptor contains a bad ptr */ + IONIC_RC_DEV_CMD = 18, /* Device cmd attempted on AdminQ */ + IONIC_RC_ENOSUPP = 19, /* Operation not supported */ + IONIC_RC_ERROR = 29, /* Generic error */ + + IONIC_RC_ERDMA = 30, /* Generic RDMA error */ +}; + +enum ionic_notifyq_opcode { + IONIC_EVENT_LINK_CHANGE = 1, + IONIC_EVENT_RESET = 2, + IONIC_EVENT_HEARTBEAT = 3, + IONIC_EVENT_LOG = 4, +}; + +/** + * struct cmd - General admin command format + * @opcode: Opcode for the command + * @lif_index: LIF index + * @cmd_data: Opcode-specific command bytes + */ +struct ionic_admin_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 cmd_data[60]; +}; + +/** + * struct admin_comp - General admin command completion format + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @cmd_data: Command-specific bytes. + * @color: Color bit. (Always 0 for commands issued to the + * Device Cmd Registers.) + */ +struct ionic_admin_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 cmd_data[11]; + u8 color; +#define IONIC_COMP_COLOR_MASK 0x80 +}; + +static inline u8 color_match(u8 color, u8 done_color) +{ + return (!!(color & IONIC_COMP_COLOR_MASK)) == done_color; +} + +/** + * struct nop_cmd - NOP command + * @opcode: opcode + */ +struct ionic_nop_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +/** + * struct nop_comp - NOP command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_nop_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct dev_init_cmd - Device init command + * @opcode: opcode + * @type: device type + */ +struct ionic_dev_init_cmd { + u8 opcode; + u8 type; + u8 rsvd[62]; +}; + +/** + * struct init_comp - Device init command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_dev_init_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct dev_reset_cmd - Device reset command + * @opcode: opcode + */ +struct ionic_dev_reset_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +/** + * struct reset_comp - Reset command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_dev_reset_comp { + u8 status; + u8 rsvd[15]; +}; + +#define IONIC_IDENTITY_VERSION_1 1 + +/** + * struct dev_identify_cmd - Driver/device identify command + * @opcode: opcode + * @ver: Highest version of identify supported by driver + */ +struct ionic_dev_identify_cmd { + u8 opcode; + u8 ver; + u8 rsvd[62]; +}; + +/** + * struct dev_identify_comp - Driver/device identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_dev_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +enum ionic_os_type { + IONIC_OS_TYPE_LINUX = 1, + IONIC_OS_TYPE_WIN = 2, + IONIC_OS_TYPE_DPDK = 3, + IONIC_OS_TYPE_FREEBSD = 4, + IONIC_OS_TYPE_IPXE = 5, + IONIC_OS_TYPE_ESXI = 6, +}; + +/** + * union drv_identity - driver identity information + * @os_type: OS type (see enum os_type) + * @os_dist: OS distribution, numeric format + * @os_dist_str: OS distribution, string format + * @kernel_ver: Kernel version, numeric format + * @kernel_ver_str: Kernel version, string format + * @driver_ver_str: Driver version, string format + */ +union ionic_drv_identity { + struct { + __le32 os_type; + __le32 os_dist; + char os_dist_str[128]; + __le32 kernel_ver; + char kernel_ver_str[32]; + char driver_ver_str[32]; + }; + __le32 words[512]; +}; + +/** + * union dev_identity - device identity information + * @version: Version of device identify + * @type: Identify type (0 for now) + * @nports: Number of ports provisioned + * @nlifs: Number of LIFs provisioned + * @nintrs: Number of interrupts provisioned + * @ndbpgs_per_lif: Number of doorbell pages per LIF + * @intr_coal_mult: Interrupt coalescing multiplication factor. + * Scale user-supplied interrupt coalescing + * value in usecs to device units using: + * device units = usecs * mult / div + * @intr_coal_div: Interrupt coalescing division factor. + * Scale user-supplied interrupt coalescing + * value in usecs to device units using: + * device units = usecs * mult / div + * + */ +union ionic_dev_identity { + struct { + u8 version; + u8 type; + u8 rsvd[2]; + u8 nports; + u8 rsvd2[3]; + __le32 nlifs; + __le32 nintrs; + __le32 ndbpgs_per_lif; + __le32 intr_coal_mult; + __le32 intr_coal_div; + }; + __le32 words[512]; +}; + +enum ionic_lif_type { + IONIC_LIF_TYPE_CLASSIC = 0, + IONIC_LIF_TYPE_MACVLAN = 1, + IONIC_LIF_TYPE_NETQUEUE = 2, +}; + +/** + * struct lif_identify_cmd - lif identify command + * @opcode: opcode + * @type: lif type (enum lif_type) + * @ver: version of identify returned by device + */ +struct ionic_lif_identify_cmd { + u8 opcode; + u8 type; + u8 ver; + u8 rsvd[61]; +}; + +/** + * struct lif_identify_comp - lif identify command completion + * @status: status of the command (enum status_code) + * @ver: version of identify returned by device + */ +struct ionic_lif_identify_comp { + u8 status; + u8 ver; + u8 rsvd2[14]; +}; + +enum ionic_lif_capability { + IONIC_LIF_CAP_ETH = BIT(0), + IONIC_LIF_CAP_RDMA = BIT(1), +}; + +/** + * Logical Queue Types + */ +enum ionic_logical_qtype { + IONIC_QTYPE_ADMINQ = 0, + IONIC_QTYPE_NOTIFYQ = 1, + IONIC_QTYPE_RXQ = 2, + IONIC_QTYPE_TXQ = 3, + IONIC_QTYPE_EQ = 4, + IONIC_QTYPE_MAX = 16, +}; + +/** + * struct lif_logical_qtype - Descriptor of logical to hardware queue type. + * @qtype: Hardware Queue Type. + * @qid_count: Number of Queue IDs of the logical type. + * @qid_base: Minimum Queue ID of the logical type. + */ +struct ionic_lif_logical_qtype { + u8 qtype; + u8 rsvd[3]; + __le32 qid_count; + __le32 qid_base; +}; + +enum ionic_lif_state { + IONIC_LIF_DISABLE = 0, + IONIC_LIF_ENABLE = 1, + IONIC_LIF_HANG_RESET = 2, +}; + +/** + * LIF configuration + * @state: lif state (enum lif_state) + * @name: lif name + * @mtu: mtu + * @mac: station mac address + * @features: features (enum eth_hw_features) + * @queue_count: queue counts per queue-type + */ +union ionic_lif_config { + struct { + u8 state; + u8 rsvd[3]; + char name[IONIC_IFNAMSIZ]; + __le32 mtu; + u8 mac[6]; + u8 rsvd2[2]; + __le64 features; + __le32 queue_count[IONIC_QTYPE_MAX]; + }; + __le32 words[64]; +}; + +/** + * struct lif_identity - lif identity information (type-specific) + * + * @capabilities LIF capabilities + * + * Ethernet: + * @version: Ethernet identify structure version. + * @features: Ethernet features supported on this lif type. + * @max_ucast_filters: Number of perfect unicast addresses supported. + * @max_mcast_filters: Number of perfect multicast addresses supported. + * @min_frame_size: Minimum size of frames to be sent + * @max_frame_size: Maximim size of frames to be sent + * @config: LIF config struct with features, mtu, mac, q counts + * + * RDMA: + * @version: RDMA version of opcodes and queue descriptors. + * @qp_opcodes: Number of rdma queue pair opcodes supported. + * @admin_opcodes: Number of rdma admin opcodes supported. + * @npts_per_lif: Page table size per lif + * @nmrs_per_lif: Number of memory regions per lif + * @nahs_per_lif: Number of address handles per lif + * @max_stride: Max work request stride. + * @cl_stride: Cache line stride. + * @pte_stride: Page table entry stride. + * @rrq_stride: Remote RQ work request stride. + * @rsq_stride: Remote SQ work request stride. + * @dcqcn_profiles: Number of DCQCN profiles + * @aq_qtype: RDMA Admin Qtype. + * @sq_qtype: RDMA Send Qtype. + * @rq_qtype: RDMA Receive Qtype. + * @cq_qtype: RDMA Completion Qtype. + * @eq_qtype: RDMA Event Qtype. + */ +union ionic_lif_identity { + struct { + __le64 capabilities; + + struct { + u8 version; + u8 rsvd[3]; + __le32 max_ucast_filters; + __le32 max_mcast_filters; + __le16 rss_ind_tbl_sz; + __le32 min_frame_size; + __le32 max_frame_size; + u8 rsvd2[106]; + union ionic_lif_config config; + } eth; + + struct { + u8 version; + u8 qp_opcodes; + u8 admin_opcodes; + u8 rsvd; + __le32 npts_per_lif; + __le32 nmrs_per_lif; + __le32 nahs_per_lif; + u8 max_stride; + u8 cl_stride; + u8 pte_stride; + u8 rrq_stride; + u8 rsq_stride; + u8 dcqcn_profiles; + u8 rsvd_dimensions[10]; + struct ionic_lif_logical_qtype aq_qtype; + struct ionic_lif_logical_qtype sq_qtype; + struct ionic_lif_logical_qtype rq_qtype; + struct ionic_lif_logical_qtype cq_qtype; + struct ionic_lif_logical_qtype eq_qtype; + } rdma; + }; + __le32 words[512]; +}; + +/** + * struct lif_init_cmd - LIF init command + * @opcode: opcode + * @type: LIF type (enum lif_type) + * @index: LIF index + * @info_pa: destination address for lif info (struct lif_info) + */ +struct ionic_lif_init_cmd { + u8 opcode; + u8 type; + __le16 index; + __le32 rsvd; + __le64 info_pa; + u8 rsvd2[48]; +}; + +/** + * struct lif_init_comp - LIF init command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_lif_init_comp { + u8 status; + u8 rsvd; + __le16 hw_index; + u8 rsvd2[12]; +}; + +/** + * struct q_init_cmd - Queue init command + * @opcode: opcode + * @type: Logical queue type + * @ver: Queue version (defines opcode/descriptor scope) + * @lif_index: LIF index + * @index: (lif, qtype) relative admin queue index + * @intr_index: Interrupt control register index + * @pid: Process ID + * @flags: + * IRQ: Interrupt requested on completion + * ENA: Enable the queue. If ENA=0 the queue is initialized + * but remains disabled, to be later enabled with the + * Queue Enable command. If ENA=1, then queue is + * initialized and then enabled. + * SG: Enable Scatter-Gather on the queue. + * in number of descs. The actual ring size is + * (1 << ring_size). For example, to + * select a ring size of 64 descriptors write + * ring_size = 6. The minimum ring_size value is 2 + * for a ring size of 4 descriptors. The maximum + * ring_size value is 16 for a ring size of 64k + * descriptors. Values of ring_size <2 and >16 are + * reserved. + * EQ: Enable the Event Queue + * @cos: Class of service for this queue. + * @ring_size: Queue ring size, encoded as a log2(size) + * @ring_base: Queue ring base address + * @cq_ring_base: Completion queue ring base address + * @sg_ring_base: Scatter/Gather ring base address + * @eq_index: Event queue index + */ +struct ionic_q_init_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 type; + u8 ver; + u8 rsvd1[2]; + __le32 index; + __le16 pid; + __le16 intr_index; + __le16 flags; +#define IONIC_QINIT_F_IRQ 0x01 /* Request interrupt on completion */ +#define IONIC_QINIT_F_ENA 0x02 /* Enable the queue */ +#define IONIC_QINIT_F_SG 0x04 /* Enable scatter/gather on the queue */ +#define IONIC_QINIT_F_EQ 0x08 /* Enable event queue */ +#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */ + u8 cos; + u8 ring_size; + __le64 ring_base; + __le64 cq_ring_base; + __le64 sg_ring_base; + __le32 eq_index; + u8 rsvd2[16]; +}; + +/** + * struct q_init_comp - Queue init command completion + * @status: The status of the command (enum status_code) + * @ver: Queue version (defines opcode/descriptor scope) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @hw_index: Hardware Queue ID + * @hw_type: Hardware Queue type + * @color: Color + */ +struct ionic_q_init_comp { + u8 status; + u8 ver; + __le16 comp_index; + __le32 hw_index; + u8 hw_type; + u8 rsvd2[6]; + u8 color; +}; + +/* the device's internal addressing uses up to 52 bits */ +#define IONIC_ADDR_LEN 52 +#define IONIC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1) + +enum ionic_txq_desc_opcode { + IONIC_TXQ_DESC_OPCODE_CSUM_NONE = 0, + IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL = 1, + IONIC_TXQ_DESC_OPCODE_CSUM_HW = 2, + IONIC_TXQ_DESC_OPCODE_TSO = 3, +}; + +/** + * struct txq_desc - Ethernet Tx queue descriptor format + * @opcode: Tx operation, see TXQ_DESC_OPCODE_*: + * + * IONIC_TXQ_DESC_OPCODE_CSUM_NONE: + * + * Non-offload send. No segmentation, + * fragmentation or checksum calc/insertion is + * performed by device; packet is prepared + * to send by software stack and requires + * no further manipulation from device. + * + * IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL: + * + * Offload 16-bit L4 checksum + * calculation/insertion. The device will + * calculate the L4 checksum value and + * insert the result in the packet's L4 + * header checksum field. The L4 checksum + * is calculated starting at @csum_start bytes + * into the packet to the end of the packet. + * The checksum insertion position is given + * in @csum_offset. This feature is only + * applicable to protocols such as TCP, UDP + * and ICMP where a standard (i.e. the + * 'IP-style' checksum) one's complement + * 16-bit checksum is used, using an IP + * pseudo-header to seed the calculation. + * Software will preload the L4 checksum + * field with the IP pseudo-header checksum. + * + * For tunnel encapsulation, @csum_start and + * @csum_offset refer to the inner L4 + * header. Supported tunnels encapsulations + * are: IPIP, GRE, and UDP. If the @encap + * is clear, no further processing by the + * device is required; software will + * calculate the outer header checksums. If + * the @encap is set, the device will + * offload the outer header checksums using + * LCO (local checksum offload) (see + * Documentation/networking/checksum- + * offloads.txt for more info). + * + * IONIC_TXQ_DESC_OPCODE_CSUM_HW: + * + * Offload 16-bit checksum computation to hardware. + * If @csum_l3 is set then the packet's L3 checksum is + * updated. Similarly, if @csum_l4 is set the the L4 + * checksum is updated. If @encap is set then encap header + * checksums are also updated. + * + * IONIC_TXQ_DESC_OPCODE_TSO: + * + * Device preforms TCP segmentation offload + * (TSO). @hdr_len is the number of bytes + * to the end of TCP header (the offset to + * the TCP payload). @mss is the desired + * MSS, the TCP payload length for each + * segment. The device will calculate/ + * insert IP (IPv4 only) and TCP checksums + * for each segment. In the first data + * buffer containing the header template, + * the driver will set IPv4 checksum to 0 + * and preload TCP checksum with the IP + * pseudo header calculated with IP length = 0. + * + * Supported tunnel encapsulations are IPIP, + * layer-3 GRE, and UDP. @hdr_len includes + * both outer and inner headers. The driver + * will set IPv4 checksum to zero and + * preload TCP checksum with IP pseudo + * header on the inner header. + * + * TCP ECN offload is supported. The device + * will set CWR flag in the first segment if + * CWR is set in the template header, and + * clear CWR in remaining segments. + * @flags: + * vlan: + * Insert an L2 VLAN header using @vlan_tci. + * encap: + * Calculate encap header checksum. + * csum_l3: + * Compute L3 header checksum. + * csum_l4: + * Compute L4 header checksum. + * tso_sot: + * TSO start + * tso_eot: + * TSO end + * @num_sg_elems: Number of scatter-gather elements in SG + * descriptor + * @addr: First data buffer's DMA address. + * (Subsequent data buffers are on txq_sg_desc). + * @len: First data buffer's length, in bytes + * @vlan_tci: VLAN tag to insert in the packet (if requested + * by @V-bit). Includes .1p and .1q tags + * @hdr_len: Length of packet headers, including + * encapsulating outer header, if applicable. + * Valid for opcodes TXQ_DESC_OPCODE_CALC_CSUM and + * TXQ_DESC_OPCODE_TSO. Should be set to zero for + * all other modes. For + * TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length + * of headers up to inner-most L4 header. For + * TXQ_DESC_OPCODE_TSO, @hdr_len is up to + * inner-most L4 payload, so inclusive of + * inner-most L4 header. + * @mss: Desired MSS value for TSO. Only applicable for + * TXQ_DESC_OPCODE_TSO. + * @csum_start: Offset into inner-most L3 header of checksum + * @csum_offset: Offset into inner-most L4 header of checksum + */ + +#define IONIC_TXQ_DESC_OPCODE_MASK 0xf +#define IONIC_TXQ_DESC_OPCODE_SHIFT 4 +#define IONIC_TXQ_DESC_FLAGS_MASK 0xf +#define IONIC_TXQ_DESC_FLAGS_SHIFT 0 +#define IONIC_TXQ_DESC_NSGE_MASK 0xf +#define IONIC_TXQ_DESC_NSGE_SHIFT 8 +#define IONIC_TXQ_DESC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1) +#define IONIC_TXQ_DESC_ADDR_SHIFT 12 + +/* common flags */ +#define IONIC_TXQ_DESC_FLAG_VLAN 0x1 +#define IONIC_TXQ_DESC_FLAG_ENCAP 0x2 + +/* flags for csum_hw opcode */ +#define IONIC_TXQ_DESC_FLAG_CSUM_L3 0x4 +#define IONIC_TXQ_DESC_FLAG_CSUM_L4 0x8 + +/* flags for tso opcode */ +#define IONIC_TXQ_DESC_FLAG_TSO_SOT 0x4 +#define IONIC_TXQ_DESC_FLAG_TSO_EOT 0x8 + +struct ionic_txq_desc { + __le64 cmd; + __le16 len; + union { + __le16 vlan_tci; + __le16 hword0; + }; + union { + __le16 csum_start; + __le16 hdr_len; + __le16 hword1; + }; + union { + __le16 csum_offset; + __le16 mss; + __le16 hword2; + }; +}; + +static inline u64 encode_txq_desc_cmd(u8 opcode, u8 flags, + u8 nsge, u64 addr) +{ + u64 cmd; + + cmd = (opcode & IONIC_TXQ_DESC_OPCODE_MASK) << IONIC_TXQ_DESC_OPCODE_SHIFT; + cmd |= (flags & IONIC_TXQ_DESC_FLAGS_MASK) << IONIC_TXQ_DESC_FLAGS_SHIFT; + cmd |= (nsge & IONIC_TXQ_DESC_NSGE_MASK) << IONIC_TXQ_DESC_NSGE_SHIFT; + cmd |= (addr & IONIC_TXQ_DESC_ADDR_MASK) << IONIC_TXQ_DESC_ADDR_SHIFT; + + return cmd; +}; + +static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags, + u8 *nsge, u64 *addr) +{ + *opcode = (cmd >> IONIC_TXQ_DESC_OPCODE_SHIFT) & IONIC_TXQ_DESC_OPCODE_MASK; + *flags = (cmd >> IONIC_TXQ_DESC_FLAGS_SHIFT) & IONIC_TXQ_DESC_FLAGS_MASK; + *nsge = (cmd >> IONIC_TXQ_DESC_NSGE_SHIFT) & IONIC_TXQ_DESC_NSGE_MASK; + *addr = (cmd >> IONIC_TXQ_DESC_ADDR_SHIFT) & IONIC_TXQ_DESC_ADDR_MASK; +}; + +#define IONIC_TX_MAX_SG_ELEMS 8 +#define IONIC_RX_MAX_SG_ELEMS 8 + +/** + * struct txq_sg_desc - Transmit scatter-gather (SG) list + * @addr: DMA address of SG element data buffer + * @len: Length of SG element data buffer, in bytes + */ +struct ionic_txq_sg_desc { + struct ionic_txq_sg_elem { + __le64 addr; + __le16 len; + __le16 rsvd[3]; + } elems[IONIC_TX_MAX_SG_ELEMS]; +}; + +/** + * struct txq_comp - Ethernet transmit queue completion descriptor + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @color: Color bit. + */ +struct ionic_txq_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 rsvd2[11]; + u8 color; +}; + +enum ionic_rxq_desc_opcode { + IONIC_RXQ_DESC_OPCODE_SIMPLE = 0, + IONIC_RXQ_DESC_OPCODE_SG = 1, +}; + +/** + * struct rxq_desc - Ethernet Rx queue descriptor format + * @opcode: Rx operation, see RXQ_DESC_OPCODE_*: + * + * RXQ_DESC_OPCODE_SIMPLE: + * + * Receive full packet into data buffer + * starting at @addr. Results of + * receive, including actual bytes received, + * are recorded in Rx completion descriptor. + * + * @len: Data buffer's length, in bytes. + * @addr: Data buffer's DMA address + */ +struct ionic_rxq_desc { + u8 opcode; + u8 rsvd[5]; + __le16 len; + __le64 addr; +}; + +/** + * struct rxq_sg_desc - Receive scatter-gather (SG) list + * @addr: DMA address of SG element data buffer + * @len: Length of SG element data buffer, in bytes + */ +struct ionic_rxq_sg_desc { + struct ionic_rxq_sg_elem { + __le64 addr; + __le16 len; + __le16 rsvd[3]; + } elems[IONIC_RX_MAX_SG_ELEMS]; +}; + +/** + * struct rxq_comp - Ethernet receive queue completion descriptor + * @status: The status of the command (enum status_code) + * @num_sg_elems: Number of SG elements used by this descriptor + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @rss_hash: 32-bit RSS hash + * @csum: 16-bit sum of the packet's L2 payload. + * If the packet's L2 payload is odd length, an extra + * zero-value byte is included in the @csum calculation but + * not included in @len. + * @vlan_tci: VLAN tag stripped from the packet. Valid if @VLAN is + * set. Includes .1p and .1q tags. + * @len: Received packet length, in bytes. Excludes FCS. + * @csum_calc L2 payload checksum is computed or not + * @csum_tcp_ok: The TCP checksum calculated by the device + * matched the checksum in the receive packet's + * TCP header + * @csum_tcp_bad: The TCP checksum calculated by the device did + * not match the checksum in the receive packet's + * TCP header. + * @csum_udp_ok: The UDP checksum calculated by the device + * matched the checksum in the receive packet's + * UDP header + * @csum_udp_bad: The UDP checksum calculated by the device did + * not match the checksum in the receive packet's + * UDP header. + * @csum_ip_ok: The IPv4 checksum calculated by the device + * matched the checksum in the receive packet's + * first IPv4 header. If the receive packet + * contains both a tunnel IPv4 header and a + * transport IPv4 header, the device validates the + * checksum for the both IPv4 headers. + * @csum_ip_bad: The IPv4 checksum calculated by the device did + * not match the checksum in the receive packet's + * first IPv4 header. If the receive packet + * contains both a tunnel IPv4 header and a + * transport IPv4 header, the device validates the + * checksum for both IP headers. + * @VLAN: VLAN header was stripped and placed in @vlan_tci. + * @pkt_type: Packet type + * @color: Color bit. + */ +struct ionic_rxq_comp { + u8 status; + u8 num_sg_elems; + __le16 comp_index; + __le32 rss_hash; + __le16 csum; + __le16 vlan_tci; + __le16 len; + u8 csum_flags; +#define IONIC_RXQ_COMP_CSUM_F_TCP_OK 0x01 +#define IONIC_RXQ_COMP_CSUM_F_TCP_BAD 0x02 +#define IONIC_RXQ_COMP_CSUM_F_UDP_OK 0x04 +#define IONIC_RXQ_COMP_CSUM_F_UDP_BAD 0x08 +#define IONIC_RXQ_COMP_CSUM_F_IP_OK 0x10 +#define IONIC_RXQ_COMP_CSUM_F_IP_BAD 0x20 +#define IONIC_RXQ_COMP_CSUM_F_VLAN 0x40 +#define IONIC_RXQ_COMP_CSUM_F_CALC 0x80 + u8 pkt_type_color; +#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x0f +}; + +enum ionic_pkt_type { + IONIC_PKT_TYPE_NON_IP = 0x000, + IONIC_PKT_TYPE_IPV4 = 0x001, + IONIC_PKT_TYPE_IPV4_TCP = 0x003, + IONIC_PKT_TYPE_IPV4_UDP = 0x005, + IONIC_PKT_TYPE_IPV6 = 0x008, + IONIC_PKT_TYPE_IPV6_TCP = 0x018, + IONIC_PKT_TYPE_IPV6_UDP = 0x028, +}; + +enum ionic_eth_hw_features { + IONIC_ETH_HW_VLAN_TX_TAG = BIT(0), + IONIC_ETH_HW_VLAN_RX_STRIP = BIT(1), + IONIC_ETH_HW_VLAN_RX_FILTER = BIT(2), + IONIC_ETH_HW_RX_HASH = BIT(3), + IONIC_ETH_HW_RX_CSUM = BIT(4), + IONIC_ETH_HW_TX_SG = BIT(5), + IONIC_ETH_HW_RX_SG = BIT(6), + IONIC_ETH_HW_TX_CSUM = BIT(7), + IONIC_ETH_HW_TSO = BIT(8), + IONIC_ETH_HW_TSO_IPV6 = BIT(9), + IONIC_ETH_HW_TSO_ECN = BIT(10), + IONIC_ETH_HW_TSO_GRE = BIT(11), + IONIC_ETH_HW_TSO_GRE_CSUM = BIT(12), + IONIC_ETH_HW_TSO_IPXIP4 = BIT(13), + IONIC_ETH_HW_TSO_IPXIP6 = BIT(14), + IONIC_ETH_HW_TSO_UDP = BIT(15), + IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16), +}; + +/** + * struct q_control_cmd - Queue control command + * @opcode: opcode + * @type: Queue type + * @lif_index: LIF index + * @index: Queue index + * @oper: Operation (enum q_control_oper) + */ +struct ionic_q_control_cmd { + u8 opcode; + u8 type; + __le16 lif_index; + __le32 index; + u8 oper; + u8 rsvd[55]; +}; + +typedef struct ionic_admin_comp ionic_q_control_comp; + +enum q_control_oper { + IONIC_Q_DISABLE = 0, + IONIC_Q_ENABLE = 1, + IONIC_Q_HANG_RESET = 2, +}; + +/** + * Physical connection type + */ +enum ionic_phy_type { + IONIC_PHY_TYPE_NONE = 0, + IONIC_PHY_TYPE_COPPER = 1, + IONIC_PHY_TYPE_FIBER = 2, +}; + +/** + * Transceiver status + */ +enum ionic_xcvr_state { + IONIC_XCVR_STATE_REMOVED = 0, + IONIC_XCVR_STATE_INSERTED = 1, + IONIC_XCVR_STATE_PENDING = 2, + IONIC_XCVR_STATE_SPROM_READ = 3, + IONIC_XCVR_STATE_SPROM_READ_ERR = 4, +}; + +/** + * Supported link modes + */ +enum ionic_xcvr_pid { + IONIC_XCVR_PID_UNKNOWN = 0, + + /* CU */ + IONIC_XCVR_PID_QSFP_100G_CR4 = 1, + IONIC_XCVR_PID_QSFP_40GBASE_CR4 = 2, + IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3, + IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4, + IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5, + + /* Fiber */ + IONIC_XCVR_PID_QSFP_100G_AOC = 50, + IONIC_XCVR_PID_QSFP_100G_ACC = 51, + IONIC_XCVR_PID_QSFP_100G_SR4 = 52, + IONIC_XCVR_PID_QSFP_100G_LR4 = 53, + IONIC_XCVR_PID_QSFP_100G_ER4 = 54, + IONIC_XCVR_PID_QSFP_40GBASE_ER4 = 55, + IONIC_XCVR_PID_QSFP_40GBASE_SR4 = 56, + IONIC_XCVR_PID_QSFP_40GBASE_LR4 = 57, + IONIC_XCVR_PID_QSFP_40GBASE_AOC = 58, + IONIC_XCVR_PID_SFP_25GBASE_SR = 59, + IONIC_XCVR_PID_SFP_25GBASE_LR = 60, + IONIC_XCVR_PID_SFP_25GBASE_ER = 61, + IONIC_XCVR_PID_SFP_25GBASE_AOC = 62, + IONIC_XCVR_PID_SFP_10GBASE_SR = 63, + IONIC_XCVR_PID_SFP_10GBASE_LR = 64, + IONIC_XCVR_PID_SFP_10GBASE_LRM = 65, + IONIC_XCVR_PID_SFP_10GBASE_ER = 66, + IONIC_XCVR_PID_SFP_10GBASE_AOC = 67, + IONIC_XCVR_PID_SFP_10GBASE_CU = 68, + IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69, + IONIC_XCVR_PID_QSFP_100G_PSM4 = 70, +}; + +/** + * Port types + */ +enum ionic_port_type { + IONIC_PORT_TYPE_NONE = 0, /* port type not configured */ + IONIC_PORT_TYPE_ETH = 1, /* port carries ethernet traffic (inband) */ + IONIC_PORT_TYPE_MGMT = 2, /* port carries mgmt traffic (out-of-band) */ +}; + +/** + * Port config state + */ +enum ionic_port_admin_state { + IONIC_PORT_ADMIN_STATE_NONE = 0, /* port admin state not configured */ + IONIC_PORT_ADMIN_STATE_DOWN = 1, /* port is admin disabled */ + IONIC_PORT_ADMIN_STATE_UP = 2, /* port is admin enabled */ +}; + +/** + * Port operational status + */ +enum ionic_port_oper_status { + IONIC_PORT_OPER_STATUS_NONE = 0, /* port is disabled */ + IONIC_PORT_OPER_STATUS_UP = 1, /* port is linked up */ + IONIC_PORT_OPER_STATUS_DOWN = 2, /* port link status is down */ +}; + +/** + * Ethernet Forward error correction (fec) modes + */ +enum ionic_port_fec_type { + IONIC_PORT_FEC_TYPE_NONE = 0, /* Disabled */ + IONIC_PORT_FEC_TYPE_FC = 1, /* FireCode */ + IONIC_PORT_FEC_TYPE_RS = 2, /* ReedSolomon */ +}; + +/** + * Ethernet pause (flow control) modes + */ +enum ionic_port_pause_type { + IONIC_PORT_PAUSE_TYPE_NONE = 0, /* Disable Pause */ + IONIC_PORT_PAUSE_TYPE_LINK = 1, /* Link level pause */ + IONIC_PORT_PAUSE_TYPE_PFC = 2, /* Priority-Flow control */ +}; + +/** + * Loopback modes + */ +enum ionic_port_loopback_mode { + IONIC_PORT_LOOPBACK_MODE_NONE = 0, /* Disable loopback */ + IONIC_PORT_LOOPBACK_MODE_MAC = 1, /* MAC loopback */ + IONIC_PORT_LOOPBACK_MODE_PHY = 2, /* PHY/Serdes loopback */ +}; + +/** + * Transceiver Status information + * @state: Transceiver status (enum xcvr_state) + * @phy: Physical connection type (enum phy_type) + * @pid: Transceiver link mode (enum pid) + * @sprom: Transceiver sprom contents + */ +struct ionic_xcvr_status { + u8 state; + u8 phy; + __le16 pid; + u8 sprom[256]; +}; + +/** + * Port configuration + * @speed: port speed (in Mbps) + * @mtu: mtu + * @state: port admin state (enum port_admin_state) + * @an_enable: autoneg enable + * @fec_type: fec type (enum port_fec_type) + * @pause_type: pause type (enum port_pause_type) + * @loopback_mode: loopback mode (enum port_loopback_mode) + */ +union ionic_port_config { + struct { +#define IONIC_SPEED_100G 100000 /* 100G in Mbps */ +#define IONIC_SPEED_50G 50000 /* 50G in Mbps */ +#define IONIC_SPEED_40G 40000 /* 40G in Mbps */ +#define IONIC_SPEED_25G 25000 /* 25G in Mbps */ +#define IONIC_SPEED_10G 10000 /* 10G in Mbps */ +#define IONIC_SPEED_1G 1000 /* 1G in Mbps */ + __le32 speed; + __le32 mtu; + u8 state; + u8 an_enable; + u8 fec_type; +#define IONIC_PAUSE_TYPE_MASK 0x0f +#define IONIC_PAUSE_FLAGS_MASK 0xf0 +#define IONIC_PAUSE_F_TX 0x10 +#define IONIC_PAUSE_F_RX 0x20 + u8 pause_type; + u8 loopback_mode; + }; + __le32 words[64]; +}; + +/** + * Port Status information + * @status: link status (enum port_oper_status) + * @id: port id + * @speed: link speed (in Mbps) + * @xcvr: tranceiver status + */ +struct ionic_port_status { + __le32 id; + __le32 speed; + u8 status; + u8 rsvd[51]; + struct ionic_xcvr_status xcvr; +}; + +/** + * struct port_identify_cmd - Port identify command + * @opcode: opcode + * @index: port index + * @ver: Highest version of identify supported by driver + */ +struct ionic_port_identify_cmd { + u8 opcode; + u8 index; + u8 ver; + u8 rsvd[61]; +}; + +/** + * struct port_identify_comp - Port identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_port_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +/** + * struct port_init_cmd - Port initialization command + * @opcode: opcode + * @index: port index + * @info_pa: destination address for port info (struct port_info) + */ +struct ionic_port_init_cmd { + u8 opcode; + u8 index; + u8 rsvd[6]; + __le64 info_pa; + u8 rsvd2[48]; +}; + +/** + * struct port_init_comp - Port initialization command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_port_init_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct port_reset_cmd - Port reset command + * @opcode: opcode + * @index: port index + */ +struct ionic_port_reset_cmd { + u8 opcode; + u8 index; + u8 rsvd[62]; +}; + +/** + * struct port_reset_comp - Port reset command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_port_reset_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * enum stats_ctl_cmd - List of commands for stats control + */ +enum ionic_stats_ctl_cmd { + IONIC_STATS_CTL_RESET = 0, +}; + + +/** + * enum ionic_port_attr - List of device attributes + */ +enum ionic_port_attr { + IONIC_PORT_ATTR_STATE = 0, + IONIC_PORT_ATTR_SPEED = 1, + IONIC_PORT_ATTR_MTU = 2, + IONIC_PORT_ATTR_AUTONEG = 3, + IONIC_PORT_ATTR_FEC = 4, + IONIC_PORT_ATTR_PAUSE = 5, + IONIC_PORT_ATTR_LOOPBACK = 6, + IONIC_PORT_ATTR_STATS_CTRL = 7, +}; + +/** + * struct port_setattr_cmd - Set port attributes on the NIC + * @opcode: Opcode + * @index: port index + * @attr: Attribute type (enum ionic_port_attr) + */ +struct ionic_port_setattr_cmd { + u8 opcode; + u8 index; + u8 attr; + u8 rsvd; + union { + u8 state; + __le32 speed; + __le32 mtu; + u8 an_enable; + u8 fec_type; + u8 pause_type; + u8 loopback_mode; + u8 stats_ctl; + u8 rsvd2[60]; + }; +}; + +/** + * struct port_setattr_comp - Port set attr command completion + * @status: The status of the command (enum status_code) + * @color: Color bit + */ +struct ionic_port_setattr_comp { + u8 status; + u8 rsvd[14]; + u8 color; +}; + +/** + * struct port_getattr_cmd - Get port attributes from the NIC + * @opcode: Opcode + * @index: port index + * @attr: Attribute type (enum ionic_port_attr) + */ +struct ionic_port_getattr_cmd { + u8 opcode; + u8 index; + u8 attr; + u8 rsvd[61]; +}; + +/** + * struct port_getattr_comp - Port get attr command completion + * @status: The status of the command (enum status_code) + * @color: Color bit + */ +struct ionic_port_getattr_comp { + u8 status; + u8 rsvd[3]; + union { + u8 state; + __le32 speed; + __le32 mtu; + u8 an_enable; + u8 fec_type; + u8 pause_type; + u8 loopback_mode; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct lif_status - Lif status register + * @eid: most recent NotifyQ event id + * @port_num: port the lif is connected to + * @link_status: port status (enum port_oper_status) + * @link_speed: speed of link in Mbps + * @link_down_count: number of times link status changes + */ +struct ionic_lif_status { + __le64 eid; + u8 port_num; + u8 rsvd; + __le16 link_status; + __le32 link_speed; /* units of 1Mbps: eg 10000 = 10Gbps */ + __le16 link_down_count; + u8 rsvd2[46]; +}; + +/** + * struct lif_reset_cmd - LIF reset command + * @opcode: opcode + * @index: LIF index + */ +struct ionic_lif_reset_cmd { + u8 opcode; + u8 rsvd; + __le16 index; + __le32 rsvd2[15]; +}; + +typedef struct ionic_admin_comp ionic_lif_reset_comp; + +enum ionic_dev_state { + IONIC_DEV_DISABLE = 0, + IONIC_DEV_ENABLE = 1, + IONIC_DEV_HANG_RESET = 2, +}; + +/** + * enum dev_attr - List of device attributes + */ +enum ionic_dev_attr { + IONIC_DEV_ATTR_STATE = 0, + IONIC_DEV_ATTR_NAME = 1, + IONIC_DEV_ATTR_FEATURES = 2, +}; + +/** + * struct dev_setattr_cmd - Set Device attributes on the NIC + * @opcode: Opcode + * @attr: Attribute type (enum dev_attr) + * @state: Device state (enum dev_state) + * @name: The bus info, e.g. PCI slot-device-function, 0 terminated + * @features: Device features + */ +struct ionic_dev_setattr_cmd { + u8 opcode; + u8 attr; + __le16 rsvd; + union { + u8 state; + char name[IONIC_IFNAMSIZ]; + __le64 features; + u8 rsvd2[60]; + }; +}; + +/** + * struct dev_setattr_comp - Device set attr command completion + * @status: The status of the command (enum status_code) + * @features: Device features + * @color: Color bit + */ +struct ionic_dev_setattr_comp { + u8 status; + u8 rsvd[3]; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct dev_getattr_cmd - Get Device attributes from the NIC + * @opcode: opcode + * @attr: Attribute type (enum dev_attr) + */ +struct ionic_dev_getattr_cmd { + u8 opcode; + u8 attr; + u8 rsvd[62]; +}; + +/** + * struct dev_setattr_comp - Device set attr command completion + * @status: The status of the command (enum status_code) + * @features: Device features + * @color: Color bit + */ +struct ionic_dev_getattr_comp { + u8 status; + u8 rsvd[3]; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * RSS parameters + */ +#define IONIC_RSS_HASH_KEY_SIZE 40 + +enum ionic_rss_hash_types { + IONIC_RSS_TYPE_IPV4 = BIT(0), + IONIC_RSS_TYPE_IPV4_TCP = BIT(1), + IONIC_RSS_TYPE_IPV4_UDP = BIT(2), + IONIC_RSS_TYPE_IPV6 = BIT(3), + IONIC_RSS_TYPE_IPV6_TCP = BIT(4), + IONIC_RSS_TYPE_IPV6_UDP = BIT(5), +}; + +/** + * enum lif_attr - List of LIF attributes + */ +enum ionic_lif_attr { + IONIC_LIF_ATTR_STATE = 0, + IONIC_LIF_ATTR_NAME = 1, + IONIC_LIF_ATTR_MTU = 2, + IONIC_LIF_ATTR_MAC = 3, + IONIC_LIF_ATTR_FEATURES = 4, + IONIC_LIF_ATTR_RSS = 5, + IONIC_LIF_ATTR_STATS_CTRL = 6, +}; + +/** + * struct lif_setattr_cmd - Set LIF attributes on the NIC + * @opcode: Opcode + * @type: Attribute type (enum lif_attr) + * @index: LIF index + * @state: lif state (enum lif_state) + * @name: The netdev name string, 0 terminated + * @mtu: Mtu + * @mac: Station mac + * @features: Features (enum eth_hw_features) + * @rss: RSS properties + * @types: The hash types to enable (see rss_hash_types). + * @key: The hash secret key. + * @addr: Address for the indirection table shared memory. + * @stats_ctl: stats control commands (enum stats_ctl_cmd) + */ +struct ionic_lif_setattr_cmd { + u8 opcode; + u8 attr; + __le16 index; + union { + u8 state; + char name[IONIC_IFNAMSIZ]; + __le32 mtu; + u8 mac[6]; + __le64 features; + struct { + __le16 types; + u8 key[IONIC_RSS_HASH_KEY_SIZE]; + u8 rsvd[6]; + __le64 addr; + } rss; + u8 stats_ctl; + u8 rsvd[60]; + }; +}; + +/** + * struct lif_setattr_comp - LIF set attr command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @features: features (enum eth_hw_features) + * @color: Color bit + */ +struct ionic_lif_setattr_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct lif_getattr_cmd - Get LIF attributes from the NIC + * @opcode: Opcode + * @attr: Attribute type (enum lif_attr) + * @index: LIF index + */ +struct ionic_lif_getattr_cmd { + u8 opcode; + u8 attr; + __le16 index; + u8 rsvd[60]; +}; + +/** + * struct lif_getattr_comp - LIF get attr command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @state: lif state (enum lif_state) + * @name: The netdev name string, 0 terminated + * @mtu: Mtu + * @mac: Station mac + * @features: Features (enum eth_hw_features) + * @color: Color bit + */ +struct ionic_lif_getattr_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + union { + u8 state; + __le32 mtu; + u8 mac[6]; + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +enum ionic_rx_mode { + IONIC_RX_MODE_F_UNICAST = BIT(0), + IONIC_RX_MODE_F_MULTICAST = BIT(1), + IONIC_RX_MODE_F_BROADCAST = BIT(2), + IONIC_RX_MODE_F_PROMISC = BIT(3), + IONIC_RX_MODE_F_ALLMULTI = BIT(4), +}; + +/** + * struct rx_mode_set_cmd - Set LIF's Rx mode command + * @opcode: opcode + * @lif_index: LIF index + * @rx_mode: Rx mode flags: + * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets. + * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets. + * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets. + * IONIC_RX_MODE_F_PROMISC: Accept any packets. + * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets. + */ +struct ionic_rx_mode_set_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le16 rx_mode; + __le16 rsvd2[29]; +}; + +typedef struct ionic_admin_comp ionic_rx_mode_set_comp; + +enum ionic_rx_filter_match_type { + IONIC_RX_FILTER_MATCH_VLAN = 0, + IONIC_RX_FILTER_MATCH_MAC, + IONIC_RX_FILTER_MATCH_MAC_VLAN, +}; + +/** + * struct rx_filter_add_cmd - Add LIF Rx filter command + * @opcode: opcode + * @qtype: Queue type + * @lif_index: LIF index + * @qid: Queue ID + * @match: Rx filter match type. (See IONIC_RX_FILTER_MATCH_xxx) + * @vlan: VLAN ID + * @addr: MAC address (network-byte order) + */ +struct ionic_rx_filter_add_cmd { + u8 opcode; + u8 qtype; + __le16 lif_index; + __le32 qid; + __le16 match; + union { + struct { + __le16 vlan; + } vlan; + struct { + u8 addr[6]; + } mac; + struct { + __le16 vlan; + u8 addr[6]; + } mac_vlan; + u8 rsvd[54]; + }; +}; + +/** + * struct rx_filter_add_comp - Add LIF Rx filter command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @filter_id: Filter ID + * @color: Color bit. + */ +struct ionic_rx_filter_add_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + __le32 filter_id; + u8 rsvd2[7]; + u8 color; +}; + +/** + * struct rx_filter_del_cmd - Delete LIF Rx filter command + * @opcode: opcode + * @lif_index: LIF index + * @filter_id: Filter ID + */ +struct ionic_rx_filter_del_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le32 filter_id; + u8 rsvd2[56]; +}; + +typedef struct ionic_admin_comp ionic_rx_filter_del_comp; + +/** + * struct qos_identify_cmd - QoS identify command + * @opcode: opcode + * @ver: Highest version of identify supported by driver + * + */ +struct ionic_qos_identify_cmd { + u8 opcode; + u8 ver; + u8 rsvd[62]; +}; + +/** + * struct qos_identify_comp - QoS identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_qos_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +#define IONIC_QOS_CLASS_MAX 7 +#define IONIC_QOS_CLASS_NAME_SZ 32 +#define IONIC_QOS_DSCP_MAX_VALUES 64 + +/** + * enum qos_class + */ +enum ionic_qos_class { + IONIC_QOS_CLASS_DEFAULT = 0, + IONIC_QOS_CLASS_USER_DEFINED_1 = 1, + IONIC_QOS_CLASS_USER_DEFINED_2 = 2, + IONIC_QOS_CLASS_USER_DEFINED_3 = 3, + IONIC_QOS_CLASS_USER_DEFINED_4 = 4, + IONIC_QOS_CLASS_USER_DEFINED_5 = 5, + IONIC_QOS_CLASS_USER_DEFINED_6 = 6, +}; + +/** + * enum qos_class_type - Traffic classification criteria + */ +enum ionic_qos_class_type { + IONIC_QOS_CLASS_TYPE_NONE = 0, + IONIC_QOS_CLASS_TYPE_PCP = 1, /* Dot1Q pcp */ + IONIC_QOS_CLASS_TYPE_DSCP = 2, /* IP dscp */ +}; + +/** + * enum qos_sched_type - Qos class scheduling type + */ +enum ionic_qos_sched_type { + IONIC_QOS_SCHED_TYPE_STRICT = 0, /* Strict priority */ + IONIC_QOS_SCHED_TYPE_DWRR = 1, /* Deficit weighted round-robin */ +}; + +/** + * union qos_config - Qos configuration structure + * @flags: Configuration flags + * IONIC_QOS_CONFIG_F_ENABLE enable + * IONIC_QOS_CONFIG_F_DROP drop/nodrop + * IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite + * IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite + * @sched_type: Qos class scheduling type (enum qos_sched_type) + * @class_type: Qos class type (enum qos_class_type) + * @pause_type: Qos pause type (enum qos_pause_type) + * @name: Qos class name + * @mtu: MTU of the class + * @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP) + * @dwrr_weight: Qos class scheduling weight + * @strict_rlmt: Rate limit for strict priority scheduling + * @rw_dot1q_pcp: Rewrite dot1q pcp to this value (valid iff F_RW_DOT1Q_PCP) + * @rw_ip_dscp: Rewrite ip dscp to this value (valid iff F_RW_IP_DSCP) + * @dot1q_pcp: Dot1q pcp value + * @ndscp: Number of valid dscp values in the ip_dscp field + * @ip_dscp: IP dscp values + */ +union ionic_qos_config { + struct { +#define IONIC_QOS_CONFIG_F_ENABLE BIT(0) +#define IONIC_QOS_CONFIG_F_DROP BIT(1) +#define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP BIT(2) +#define IONIC_QOS_CONFIG_F_RW_IP_DSCP BIT(3) + u8 flags; + u8 sched_type; + u8 class_type; + u8 pause_type; + char name[IONIC_QOS_CLASS_NAME_SZ]; + __le32 mtu; + /* flow control */ + u8 pfc_cos; + /* scheduler */ + union { + u8 dwrr_weight; + __le64 strict_rlmt; + }; + /* marking */ + union { + u8 rw_dot1q_pcp; + u8 rw_ip_dscp; + }; + /* classification */ + union { + u8 dot1q_pcp; + struct { + u8 ndscp; + u8 ip_dscp[IONIC_QOS_DSCP_MAX_VALUES]; + }; + }; + }; + __le32 words[64]; +}; + +/** + * union qos_identity - QoS identity structure + * @version: Version of the identify structure + * @type: QoS system type + * @nclasses: Number of usable QoS classes + * @config: Current configuration of classes + */ +union ionic_qos_identity { + struct { + u8 version; + u8 type; + u8 rsvd[62]; + union ionic_qos_config config[IONIC_QOS_CLASS_MAX]; + }; + __le32 words[512]; +}; + +/** + * struct qos_init_cmd - QoS config init command + * @opcode: Opcode + * @group: Qos class id + * @info_pa: destination address for qos info + */ +struct ionic_qos_init_cmd { + u8 opcode; + u8 group; + u8 rsvd[6]; + __le64 info_pa; + u8 rsvd1[48]; +}; + +typedef struct ionic_admin_comp ionic_qos_init_comp; + +/** + * struct qos_reset_cmd - Qos config reset command + * @opcode: Opcode + */ +struct ionic_qos_reset_cmd { + u8 opcode; + u8 group; + u8 rsvd[62]; +}; + +typedef struct ionic_admin_comp ionic_qos_reset_comp; + +/** + * struct fw_download_cmd - Firmware download command + * @opcode: opcode + * @addr: dma address of the firmware buffer + * @offset: offset of the firmware buffer within the full image + * @length: number of valid bytes in the firmware buffer + */ +struct ionic_fw_download_cmd { + u8 opcode; + u8 rsvd[3]; + __le32 offset; + __le64 addr; + __le32 length; +}; + +typedef struct ionic_admin_comp ionic_fw_download_comp; + +enum ionic_fw_control_oper { + IONIC_FW_RESET = 0, /* Reset firmware */ + IONIC_FW_INSTALL = 1, /* Install firmware */ + IONIC_FW_ACTIVATE = 2, /* Activate firmware */ +}; + +/** + * struct fw_control_cmd - Firmware control command + * @opcode: opcode + * @oper: firmware control operation (enum fw_control_oper) + * @slot: slot to activate + */ +struct ionic_fw_control_cmd { + u8 opcode; + u8 rsvd[3]; + u8 oper; + u8 slot; + u8 rsvd1[58]; +}; + +/** + * struct fw_control_comp - Firmware control copletion + * @opcode: opcode + * @slot: slot where the firmware was installed + */ +struct ionic_fw_control_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 slot; + u8 rsvd1[10]; + u8 color; +}; + +/****************************************************************** + ******************* RDMA Commands ******************************** + ******************************************************************/ + +/** + * struct rdma_reset_cmd - Reset RDMA LIF cmd + * @opcode: opcode + * @lif_index: lif index + * + * There is no rdma specific dev command completion struct. Completion uses + * the common struct admin_comp. Only the status is indicated. Nonzero status + * means the LIF does not support rdma. + **/ +struct ionic_rdma_reset_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 rsvd2[60]; +}; + +/** + * struct rdma_queue_cmd - Create RDMA Queue command + * @opcode: opcode, 52, 53 + * @lif_index lif index + * @qid_ver: (qid | (rdma version << 24)) + * @cid: intr, eq_id, or cq_id + * @dbid: doorbell page id + * @depth_log2: log base two of queue depth + * @stride_log2: log base two of queue stride + * @dma_addr: address of the queue memory + * @xxx_table_index: temporary, but should not need pgtbl for contig. queues. + * + * The same command struct is used to create an rdma event queue, completion + * queue, or rdma admin queue. The cid is an interrupt number for an event + * queue, an event queue id for a completion queue, or a completion queue id + * for an rdma admin queue. + * + * The queue created via a dev command must be contiguous in dma space. + * + * The dev commands are intended only to be used during driver initialization, + * to create queues supporting the rdma admin queue. Other queues, and other + * types of rdma resources like memory regions, will be created and registered + * via the rdma admin queue, and will support a more complete interface + * providing scatter gather lists for larger, scattered queue buffers and + * memory registration. + * + * There is no rdma specific dev command completion struct. Completion uses + * the common struct admin_comp. Only the status is indicated. + **/ +struct ionic_rdma_queue_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le32 qid_ver; + __le32 cid; + __le16 dbid; + u8 depth_log2; + u8 stride_log2; + __le64 dma_addr; + u8 rsvd2[36]; + __le32 xxx_table_index; +}; + +/****************************************************************** + ******************* Notify Events ******************************** + ******************************************************************/ + +/** + * struct notifyq_event + * @eid: event number + * @ecode: event code + * @data: unspecified data about the event + * + * This is the generic event report struct from which the other + * actual events will be formed. + */ +struct ionic_notifyq_event { + __le64 eid; + __le16 ecode; + u8 data[54]; +}; + +/** + * struct link_change_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_LINK_CHANGE + * @link_status: link up or down, with error bits (enum port_status) + * @link_speed: speed of the network link + * + * Sent when the network link state changes between UP and DOWN + */ +struct ionic_link_change_event { + __le64 eid; + __le16 ecode; + __le16 link_status; + __le32 link_speed; /* units of 1Mbps: e.g. 10000 = 10Gbps */ + u8 rsvd[48]; +}; + +/** + * struct reset_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_RESET + * @reset_code: reset type + * @state: 0=pending, 1=complete, 2=error + * + * Sent when the NIC or some subsystem is going to be or + * has been reset. + */ +struct ionic_reset_event { + __le64 eid; + __le16 ecode; + u8 reset_code; + u8 state; + u8 rsvd[52]; +}; + +/** + * struct heartbeat_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_HEARTBEAT + * + * Sent periodically by the NIC to indicate continued health + */ +struct ionic_heartbeat_event { + __le64 eid; + __le16 ecode; + u8 rsvd[54]; +}; + +/** + * struct log_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_LOG + * @data: log data + * + * Sent to notify the driver of an internal error. + */ +struct ionic_log_event { + __le64 eid; + __le16 ecode; + u8 data[54]; +}; + +/** + * struct port_stats + */ +struct ionic_port_stats { + __le64 frames_rx_ok; + __le64 frames_rx_all; + __le64 frames_rx_bad_fcs; + __le64 frames_rx_bad_all; + __le64 octets_rx_ok; + __le64 octets_rx_all; + __le64 frames_rx_unicast; + __le64 frames_rx_multicast; + __le64 frames_rx_broadcast; + __le64 frames_rx_pause; + __le64 frames_rx_bad_length; + __le64 frames_rx_undersized; + __le64 frames_rx_oversized; + __le64 frames_rx_fragments; + __le64 frames_rx_jabber; + __le64 frames_rx_pripause; + __le64 frames_rx_stomped_crc; + __le64 frames_rx_too_long; + __le64 frames_rx_vlan_good; + __le64 frames_rx_dropped; + __le64 frames_rx_less_than_64b; + __le64 frames_rx_64b; + __le64 frames_rx_65b_127b; + __le64 frames_rx_128b_255b; + __le64 frames_rx_256b_511b; + __le64 frames_rx_512b_1023b; + __le64 frames_rx_1024b_1518b; + __le64 frames_rx_1519b_2047b; + __le64 frames_rx_2048b_4095b; + __le64 frames_rx_4096b_8191b; + __le64 frames_rx_8192b_9215b; + __le64 frames_rx_other; + __le64 frames_tx_ok; + __le64 frames_tx_all; + __le64 frames_tx_bad; + __le64 octets_tx_ok; + __le64 octets_tx_total; + __le64 frames_tx_unicast; + __le64 frames_tx_multicast; + __le64 frames_tx_broadcast; + __le64 frames_tx_pause; + __le64 frames_tx_pripause; + __le64 frames_tx_vlan; + __le64 frames_tx_less_than_64b; + __le64 frames_tx_64b; + __le64 frames_tx_65b_127b; + __le64 frames_tx_128b_255b; + __le64 frames_tx_256b_511b; + __le64 frames_tx_512b_1023b; + __le64 frames_tx_1024b_1518b; + __le64 frames_tx_1519b_2047b; + __le64 frames_tx_2048b_4095b; + __le64 frames_tx_4096b_8191b; + __le64 frames_tx_8192b_9215b; + __le64 frames_tx_other; + __le64 frames_tx_pri_0; + __le64 frames_tx_pri_1; + __le64 frames_tx_pri_2; + __le64 frames_tx_pri_3; + __le64 frames_tx_pri_4; + __le64 frames_tx_pri_5; + __le64 frames_tx_pri_6; + __le64 frames_tx_pri_7; + __le64 frames_rx_pri_0; + __le64 frames_rx_pri_1; + __le64 frames_rx_pri_2; + __le64 frames_rx_pri_3; + __le64 frames_rx_pri_4; + __le64 frames_rx_pri_5; + __le64 frames_rx_pri_6; + __le64 frames_rx_pri_7; + __le64 tx_pripause_0_1us_count; + __le64 tx_pripause_1_1us_count; + __le64 tx_pripause_2_1us_count; + __le64 tx_pripause_3_1us_count; + __le64 tx_pripause_4_1us_count; + __le64 tx_pripause_5_1us_count; + __le64 tx_pripause_6_1us_count; + __le64 tx_pripause_7_1us_count; + __le64 rx_pripause_0_1us_count; + __le64 rx_pripause_1_1us_count; + __le64 rx_pripause_2_1us_count; + __le64 rx_pripause_3_1us_count; + __le64 rx_pripause_4_1us_count; + __le64 rx_pripause_5_1us_count; + __le64 rx_pripause_6_1us_count; + __le64 rx_pripause_7_1us_count; + __le64 rx_pause_1us_count; + __le64 frames_tx_truncated; +}; + +struct ionic_mgmt_port_stats { + __le64 frames_rx_ok; + __le64 frames_rx_all; + __le64 frames_rx_bad_fcs; + __le64 frames_rx_bad_all; + __le64 octets_rx_ok; + __le64 octets_rx_all; + __le64 frames_rx_unicast; + __le64 frames_rx_multicast; + __le64 frames_rx_broadcast; + __le64 frames_rx_pause; + __le64 frames_rx_bad_length0; + __le64 frames_rx_undersized1; + __le64 frames_rx_oversized2; + __le64 frames_rx_fragments3; + __le64 frames_rx_jabber4; + __le64 frames_rx_64b5; + __le64 frames_rx_65b_127b6; + __le64 frames_rx_128b_255b7; + __le64 frames_rx_256b_511b8; + __le64 frames_rx_512b_1023b9; + __le64 frames_rx_1024b_1518b0; + __le64 frames_rx_gt_1518b1; + __le64 frames_rx_fifo_full2; + __le64 frames_tx_ok3; + __le64 frames_tx_all4; + __le64 frames_tx_bad5; + __le64 octets_tx_ok6; + __le64 octets_tx_total7; + __le64 frames_tx_unicast8; + __le64 frames_tx_multicast9; + __le64 frames_tx_broadcast0; + __le64 frames_tx_pause1; +}; + +/** + * struct port_identity - port identity structure + * @version: identity structure version + * @type: type of port (enum port_type) + * @num_lanes: number of lanes for the port + * @autoneg: autoneg supported + * @min_frame_size: minimum frame size supported + * @max_frame_size: maximum frame size supported + * @fec_type: supported fec types + * @pause_type: supported pause types + * @loopback_mode: supported loopback mode + * @speeds: supported speeds + * @config: current port configuration + */ +union ionic_port_identity { + struct { + u8 version; + u8 type; + u8 num_lanes; + u8 autoneg; + __le32 min_frame_size; + __le32 max_frame_size; + u8 fec_type[4]; + u8 pause_type[2]; + u8 loopback_mode[2]; + __le32 speeds[16]; + u8 rsvd2[44]; + union ionic_port_config config; + }; + __le32 words[512]; +}; + +/** + * struct port_info - port info structure + * @port_status: port status + * @port_stats: port stats + */ +struct ionic_port_info { + union ionic_port_config config; + struct ionic_port_status status; + struct ionic_port_stats stats; +}; + +/** + * struct lif_stats + */ +struct ionic_lif_stats { + /* RX */ + __le64 rx_ucast_bytes; + __le64 rx_ucast_packets; + __le64 rx_mcast_bytes; + __le64 rx_mcast_packets; + __le64 rx_bcast_bytes; + __le64 rx_bcast_packets; + __le64 rsvd0; + __le64 rsvd1; + /* RX drops */ + __le64 rx_ucast_drop_bytes; + __le64 rx_ucast_drop_packets; + __le64 rx_mcast_drop_bytes; + __le64 rx_mcast_drop_packets; + __le64 rx_bcast_drop_bytes; + __le64 rx_bcast_drop_packets; + __le64 rx_dma_error; + __le64 rsvd2; + /* TX */ + __le64 tx_ucast_bytes; + __le64 tx_ucast_packets; + __le64 tx_mcast_bytes; + __le64 tx_mcast_packets; + __le64 tx_bcast_bytes; + __le64 tx_bcast_packets; + __le64 rsvd3; + __le64 rsvd4; + /* TX drops */ + __le64 tx_ucast_drop_bytes; + __le64 tx_ucast_drop_packets; + __le64 tx_mcast_drop_bytes; + __le64 tx_mcast_drop_packets; + __le64 tx_bcast_drop_bytes; + __le64 tx_bcast_drop_packets; + __le64 tx_dma_error; + __le64 rsvd5; + /* Rx Queue/Ring drops */ + __le64 rx_queue_disabled; + __le64 rx_queue_empty; + __le64 rx_queue_error; + __le64 rx_desc_fetch_error; + __le64 rx_desc_data_error; + __le64 rsvd6; + __le64 rsvd7; + __le64 rsvd8; + /* Tx Queue/Ring drops */ + __le64 tx_queue_disabled; + __le64 tx_queue_error; + __le64 tx_desc_fetch_error; + __le64 tx_desc_data_error; + __le64 rsvd9; + __le64 rsvd10; + __le64 rsvd11; + __le64 rsvd12; + + /* RDMA/ROCE TX */ + __le64 tx_rdma_ucast_bytes; + __le64 tx_rdma_ucast_packets; + __le64 tx_rdma_mcast_bytes; + __le64 tx_rdma_mcast_packets; + __le64 tx_rdma_cnp_packets; + __le64 rsvd13; + __le64 rsvd14; + __le64 rsvd15; + + /* RDMA/ROCE RX */ + __le64 rx_rdma_ucast_bytes; + __le64 rx_rdma_ucast_packets; + __le64 rx_rdma_mcast_bytes; + __le64 rx_rdma_mcast_packets; + __le64 rx_rdma_cnp_packets; + __le64 rx_rdma_ecn_packets; + __le64 rsvd16; + __le64 rsvd17; + + __le64 rsvd18; + __le64 rsvd19; + __le64 rsvd20; + __le64 rsvd21; + __le64 rsvd22; + __le64 rsvd23; + __le64 rsvd24; + __le64 rsvd25; + + __le64 rsvd26; + __le64 rsvd27; + __le64 rsvd28; + __le64 rsvd29; + __le64 rsvd30; + __le64 rsvd31; + __le64 rsvd32; + __le64 rsvd33; + + __le64 rsvd34; + __le64 rsvd35; + __le64 rsvd36; + __le64 rsvd37; + __le64 rsvd38; + __le64 rsvd39; + __le64 rsvd40; + __le64 rsvd41; + + __le64 rsvd42; + __le64 rsvd43; + __le64 rsvd44; + __le64 rsvd45; + __le64 rsvd46; + __le64 rsvd47; + __le64 rsvd48; + __le64 rsvd49; + + /* RDMA/ROCE REQ Error/Debugs (768 - 895) */ + __le64 rdma_req_rx_pkt_seq_err; + __le64 rdma_req_rx_rnr_retry_err; + __le64 rdma_req_rx_remote_access_err; + __le64 rdma_req_rx_remote_inv_req_err; + __le64 rdma_req_rx_remote_oper_err; + __le64 rdma_req_rx_implied_nak_seq_err; + __le64 rdma_req_rx_cqe_err; + __le64 rdma_req_rx_cqe_flush_err; + + __le64 rdma_req_rx_dup_responses; + __le64 rdma_req_rx_invalid_packets; + __le64 rdma_req_tx_local_access_err; + __le64 rdma_req_tx_local_oper_err; + __le64 rdma_req_tx_memory_mgmt_err; + __le64 rsvd52; + __le64 rsvd53; + __le64 rsvd54; + + /* RDMA/ROCE RESP Error/Debugs (896 - 1023) */ + __le64 rdma_resp_rx_dup_requests; + __le64 rdma_resp_rx_out_of_buffer; + __le64 rdma_resp_rx_out_of_seq_pkts; + __le64 rdma_resp_rx_cqe_err; + __le64 rdma_resp_rx_cqe_flush_err; + __le64 rdma_resp_rx_local_len_err; + __le64 rdma_resp_rx_inv_request_err; + __le64 rdma_resp_rx_local_qp_oper_err; + + __le64 rdma_resp_rx_out_of_atomic_resource; + __le64 rdma_resp_tx_pkt_seq_err; + __le64 rdma_resp_tx_remote_inv_req_err; + __le64 rdma_resp_tx_remote_access_err; + __le64 rdma_resp_tx_remote_oper_err; + __le64 rdma_resp_tx_rnr_retry_err; + __le64 rsvd57; + __le64 rsvd58; +}; + +/** + * struct lif_info - lif info structure + */ +struct ionic_lif_info { + union ionic_lif_config config; + struct ionic_lif_status status; + struct ionic_lif_stats stats; +}; + +union ionic_dev_cmd { + u32 words[16]; + struct ionic_admin_cmd cmd; + struct ionic_nop_cmd nop; + + struct ionic_dev_identify_cmd identify; + struct ionic_dev_init_cmd init; + struct ionic_dev_reset_cmd reset; + struct ionic_dev_getattr_cmd getattr; + struct ionic_dev_setattr_cmd setattr; + + struct ionic_port_identify_cmd port_identify; + struct ionic_port_init_cmd port_init; + struct ionic_port_reset_cmd port_reset; + struct ionic_port_getattr_cmd port_getattr; + struct ionic_port_setattr_cmd port_setattr; + + struct ionic_lif_identify_cmd lif_identify; + struct ionic_lif_init_cmd lif_init; + struct ionic_lif_reset_cmd lif_reset; + + struct ionic_qos_identify_cmd qos_identify; + struct ionic_qos_init_cmd qos_init; + struct ionic_qos_reset_cmd qos_reset; + + struct ionic_q_init_cmd q_init; +}; + +union ionic_dev_cmd_comp { + u32 words[4]; + u8 status; + struct ionic_admin_comp comp; + struct ionic_nop_comp nop; + + struct ionic_dev_identify_comp identify; + struct ionic_dev_init_comp init; + struct ionic_dev_reset_comp reset; + struct ionic_dev_getattr_comp getattr; + struct ionic_dev_setattr_comp setattr; + + struct ionic_port_identify_comp port_identify; + struct ionic_port_init_comp port_init; + struct ionic_port_reset_comp port_reset; + struct ionic_port_getattr_comp port_getattr; + struct ionic_port_setattr_comp port_setattr; + + struct ionic_lif_identify_comp lif_identify; + struct ionic_lif_init_comp lif_init; + ionic_lif_reset_comp lif_reset; + + struct ionic_qos_identify_comp qos_identify; + ionic_qos_init_comp qos_init; + ionic_qos_reset_comp qos_reset; + + struct ionic_q_init_comp q_init; +}; + +/** + * union dev_info - Device info register format (read-only) + * @signature: Signature value of 0x44455649 ('DEVI'). + * @version: Current version of info. + * @asic_type: Asic type. + * @asic_rev: Asic revision. + * @fw_status: Firmware status. + * @fw_heartbeat: Firmware heartbeat counter. + * @serial_num: Serial number. + * @fw_version: Firmware version. + */ +union ionic_dev_info_regs { +#define IONIC_DEVINFO_FWVERS_BUFLEN 32 +#define IONIC_DEVINFO_SERIAL_BUFLEN 32 + struct { + u32 signature; + u8 version; + u8 asic_type; + u8 asic_rev; + u8 fw_status; + u32 fw_heartbeat; + char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN]; + char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN]; + }; + u32 words[512]; +}; + +/** + * union dev_cmd_regs - Device command register format (read-write) + * @doorbell: Device Cmd Doorbell, write-only. + * Write a 1 to signal device to process cmd, + * poll done for completion. + * @done: Done indicator, bit 0 == 1 when command is complete. + * @cmd: Opcode-specific command bytes + * @comp: Opcode-specific response bytes + * @data: Opcode-specific side-data + */ +union ionic_dev_cmd_regs { + struct { + u32 doorbell; + u32 done; + union ionic_dev_cmd cmd; + union ionic_dev_cmd_comp comp; + u8 rsvd[48]; + u32 data[478]; + }; + u32 words[512]; +}; + +/** + * union dev_regs - Device register format in for bar 0 page 0 + * @info: Device info registers + * @devcmd: Device command registers + */ +union ionic_dev_regs { + struct { + union ionic_dev_info_regs info; + union ionic_dev_cmd_regs devcmd; + }; + __le32 words[1024]; +}; + +union ionic_adminq_cmd { + struct ionic_admin_cmd cmd; + struct ionic_nop_cmd nop; + struct ionic_q_init_cmd q_init; + struct ionic_q_control_cmd q_control; + struct ionic_lif_setattr_cmd lif_setattr; + struct ionic_lif_getattr_cmd lif_getattr; + struct ionic_rx_mode_set_cmd rx_mode_set; + struct ionic_rx_filter_add_cmd rx_filter_add; + struct ionic_rx_filter_del_cmd rx_filter_del; + struct ionic_rdma_reset_cmd rdma_reset; + struct ionic_rdma_queue_cmd rdma_queue; + struct ionic_fw_download_cmd fw_download; + struct ionic_fw_control_cmd fw_control; +}; + +union ionic_adminq_comp { + struct ionic_admin_comp comp; + struct ionic_nop_comp nop; + struct ionic_q_init_comp q_init; + struct ionic_lif_setattr_comp lif_setattr; + struct ionic_lif_getattr_comp lif_getattr; + struct ionic_rx_filter_add_comp rx_filter_add; + struct ionic_fw_control_comp fw_control; +}; + +#define IONIC_BARS_MAX 6 +#define IONIC_PCI_BAR_DBELL 1 + +/* BAR0 */ +#define IONIC_BAR0_SIZE 0x8000 + +#define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000 +#define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800 +#define IONIC_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00 +#define IONIC_BAR0_INTR_STATUS_OFFSET 0x1000 +#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000 +#define IONIC_DEV_CMD_DONE 0x00000001 + +#define IONIC_ASIC_TYPE_CAPRI 0 + +/** + * struct doorbell - Doorbell register layout + * @p_index: Producer index + * @ring: Selects the specific ring of the queue to update. + * Type-specific meaning: + * ring=0: Default producer/consumer queue. + * ring=1: (CQ, EQ) Re-Arm queue. RDMA CQs + * send events to EQs when armed. EQs send + * interrupts when armed. + * @qid: The queue id selects the queue destination for the + * producer index and flags. + */ +struct ionic_doorbell { + __le16 p_index; + u8 ring; + u8 qid_lo; + __le16 qid_hi; + u16 rsvd2; +}; + +struct ionic_intr_status { + u32 status[2]; +}; + +struct ionic_notifyq_cmd { + __le32 data; /* Not used but needed for qcq structure */ +}; + +union ionic_notifyq_comp { + struct ionic_notifyq_event event; + struct ionic_link_change_event link_change; + struct ionic_reset_event reset; + struct ionic_heartbeat_event heartbeat; + struct ionic_log_event log; +}; + +/* Deprecate */ +struct ionic_identity { + union ionic_drv_identity drv; + union ionic_dev_identity dev; + union ionic_lif_identity lif; + union ionic_port_identity port; + union ionic_qos_identity qos; +}; + +#pragma pack(pop) + +#endif /* _IONIC_IF_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c new file mode 100644 index 000000000000..db7c82742828 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -0,0 +1,2274 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/cpumask.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_txrx.h" +#include "ionic_ethtool.h" +#include "ionic_debugfs.h" + +static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode); +static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); +static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); +static void ionic_link_status_check(struct ionic_lif *lif); + +static void ionic_lif_deferred_work(struct work_struct *work) +{ + struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); + struct ionic_deferred *def = &lif->deferred; + struct ionic_deferred_work *w = NULL; + + spin_lock_bh(&def->lock); + if (!list_empty(&def->list)) { + w = list_first_entry(&def->list, + struct ionic_deferred_work, list); + list_del(&w->list); + } + spin_unlock_bh(&def->lock); + + if (w) { + switch (w->type) { + case IONIC_DW_TYPE_RX_MODE: + ionic_lif_rx_mode(lif, w->rx_mode); + break; + case IONIC_DW_TYPE_RX_ADDR_ADD: + ionic_lif_addr_add(lif, w->addr); + break; + case IONIC_DW_TYPE_RX_ADDR_DEL: + ionic_lif_addr_del(lif, w->addr); + break; + case IONIC_DW_TYPE_LINK_STATUS: + ionic_link_status_check(lif); + break; + default: + break; + } + kfree(w); + schedule_work(&def->work); + } +} + +static void ionic_lif_deferred_enqueue(struct ionic_deferred *def, + struct ionic_deferred_work *work) +{ + spin_lock_bh(&def->lock); + list_add_tail(&work->list, &def->list); + spin_unlock_bh(&def->lock); + schedule_work(&def->work); +} + +static void ionic_link_status_check(struct ionic_lif *lif) +{ + struct net_device *netdev = lif->netdev; + u16 link_status; + bool link_up; + + link_status = le16_to_cpu(lif->info->status.link_status); + link_up = link_status == IONIC_PORT_OPER_STATUS_UP; + + /* filter out the no-change cases */ + if (link_up == netif_carrier_ok(netdev)) + goto link_out; + + if (link_up) { + netdev_info(netdev, "Link up - %d Gbps\n", + le32_to_cpu(lif->info->status.link_speed) / 1000); + + if (test_bit(IONIC_LIF_UP, lif->state)) { + netif_tx_wake_all_queues(lif->netdev); + netif_carrier_on(netdev); + } + } else { + netdev_info(netdev, "Link down\n"); + + /* carrier off first to avoid watchdog timeout */ + netif_carrier_off(netdev); + if (test_bit(IONIC_LIF_UP, lif->state)) + netif_tx_stop_all_queues(netdev); + } + +link_out: + clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state); +} + +static void ionic_link_status_check_request(struct ionic_lif *lif) +{ + struct ionic_deferred_work *work; + + /* we only need one request outstanding at a time */ + if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state)) + return; + + if (in_interrupt()) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + work->type = IONIC_DW_TYPE_LINK_STATUS; + ionic_lif_deferred_enqueue(&lif->deferred, work); + } else { + ionic_link_status_check(lif); + } +} + +static irqreturn_t ionic_isr(int irq, void *data) +{ + struct napi_struct *napi = data; + + napi_schedule_irqoff(napi); + + return IRQ_HANDLED; +} + +static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct ionic_intr_info *intr = &qcq->intr; + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + const char *name; + + if (lif->registered) + name = lif->netdev->name; + else + name = dev_name(dev); + + snprintf(intr->name, sizeof(intr->name), + "%s-%s-%s", IONIC_DRV_NAME, name, q->name); + + return devm_request_irq(dev, intr->vector, ionic_isr, + 0, intr->name, &qcq->napi); +} + +static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) +{ + struct ionic *ionic = lif->ionic; + int index; + + index = find_first_zero_bit(ionic->intrs, ionic->nintrs); + if (index == ionic->nintrs) { + netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", + __func__, index, ionic->nintrs); + return -ENOSPC; + } + + set_bit(index, ionic->intrs); + ionic_intr_init(&ionic->idev, intr, index); + + return 0; +} + +static void ionic_intr_free(struct ionic_lif *lif, int index) +{ + if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs) + clear_bit(index, lif->ionic->intrs); +} + +static int ionic_qcq_enable(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = q->lif; + struct ionic_dev *idev; + struct device *dev; + + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_control = { + .opcode = IONIC_CMD_Q_CONTROL, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .index = cpu_to_le32(q->index), + .oper = IONIC_Q_ENABLE, + }, + }; + + idev = &lif->ionic->idev; + dev = lif->ionic->dev; + + dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", + ctx.cmd.q_control.index, ctx.cmd.q_control.type); + + if (qcq->flags & IONIC_QCQ_F_INTR) { + irq_set_affinity_hint(qcq->intr.vector, + &qcq->intr.affinity_mask); + napi_enable(&qcq->napi); + ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + } + + return ionic_adminq_post_wait(lif, &ctx); +} + +static int ionic_qcq_disable(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = q->lif; + struct ionic_dev *idev; + struct device *dev; + + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_control = { + .opcode = IONIC_CMD_Q_CONTROL, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .index = cpu_to_le32(q->index), + .oper = IONIC_Q_DISABLE, + }, + }; + + idev = &lif->ionic->idev; + dev = lif->ionic->dev; + + dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n", + ctx.cmd.q_control.index, ctx.cmd.q_control.type); + + if (qcq->flags & IONIC_QCQ_F_INTR) { + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + synchronize_irq(qcq->intr.vector); + irq_set_affinity_hint(qcq->intr.vector, NULL); + napi_disable(&qcq->napi); + } + + return ionic_adminq_post_wait(lif, &ctx); +} + +static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct ionic_dev *idev = &lif->ionic->idev; + struct device *dev = lif->ionic->dev; + + if (!qcq) + return; + + ionic_debugfs_del_qcq(qcq); + + if (!(qcq->flags & IONIC_QCQ_F_INITED)) + return; + + if (qcq->flags & IONIC_QCQ_F_INTR) { + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + devm_free_irq(dev, qcq->intr.vector, &qcq->napi); + netif_napi_del(&qcq->napi); + } + + qcq->flags &= ~IONIC_QCQ_F_INITED; +} + +static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct device *dev = lif->ionic->dev; + + if (!qcq) + return; + + dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa); + qcq->base = NULL; + qcq->base_pa = 0; + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_free(lif, qcq->intr.index); + + devm_kfree(dev, qcq->cq.info); + qcq->cq.info = NULL; + devm_kfree(dev, qcq->q.info); + qcq->q.info = NULL; + devm_kfree(dev, qcq); +} + +static void ionic_qcqs_free(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + unsigned int i; + + if (lif->notifyqcq) { + ionic_qcq_free(lif, lif->notifyqcq); + lif->notifyqcq = NULL; + } + + if (lif->adminqcq) { + ionic_qcq_free(lif, lif->adminqcq); + lif->adminqcq = NULL; + } + + for (i = 0; i < lif->nxqs; i++) + if (lif->rxqcqs[i].stats) + devm_kfree(dev, lif->rxqcqs[i].stats); + + devm_kfree(dev, lif->rxqcqs); + lif->rxqcqs = NULL; + + for (i = 0; i < lif->nxqs; i++) + if (lif->txqcqs[i].stats) + devm_kfree(dev, lif->txqcqs[i].stats); + + devm_kfree(dev, lif->txqcqs); + lif->txqcqs = NULL; +} + +static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, + struct ionic_qcq *n_qcq) +{ + if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { + ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index); + n_qcq->flags &= ~IONIC_QCQ_F_INTR; + } + + n_qcq->intr.vector = src_qcq->intr.vector; + n_qcq->intr.index = src_qcq->intr.index; +} + +static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, + unsigned int index, + const char *name, unsigned int flags, + unsigned int num_descs, unsigned int desc_size, + unsigned int cq_desc_size, + unsigned int sg_desc_size, + unsigned int pid, struct ionic_qcq **qcq) +{ + struct ionic_dev *idev = &lif->ionic->idev; + u32 q_size, cq_size, sg_size, total_size; + struct device *dev = lif->ionic->dev; + void *q_base, *cq_base, *sg_base; + dma_addr_t cq_base_pa = 0; + dma_addr_t sg_base_pa = 0; + dma_addr_t q_base_pa = 0; + struct ionic_qcq *new; + int err; + + *qcq = NULL; + + q_size = num_descs * desc_size; + cq_size = num_descs * cq_desc_size; + sg_size = num_descs * sg_desc_size; + + total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE); + /* Note: aligning q_size/cq_size is not enough due to cq_base + * address aligning as q_base could be not aligned to the page. + * Adding PAGE_SIZE. + */ + total_size += PAGE_SIZE; + if (flags & IONIC_QCQ_F_SG) { + total_size += ALIGN(sg_size, PAGE_SIZE); + total_size += PAGE_SIZE; + } + + new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); + if (!new) { + netdev_err(lif->netdev, "Cannot allocate queue structure\n"); + err = -ENOMEM; + goto err_out; + } + + new->flags = flags; + + new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs, + GFP_KERNEL); + if (!new->q.info) { + netdev_err(lif->netdev, "Cannot allocate queue info\n"); + err = -ENOMEM; + goto err_out; + } + + new->q.type = type; + + err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, + desc_size, sg_desc_size, pid); + if (err) { + netdev_err(lif->netdev, "Cannot initialize queue\n"); + goto err_out; + } + + if (flags & IONIC_QCQ_F_INTR) { + err = ionic_intr_alloc(lif, &new->intr); + if (err) { + netdev_warn(lif->netdev, "no intr for %s: %d\n", + name, err); + goto err_out; + } + + err = ionic_bus_get_irq(lif->ionic, new->intr.index); + if (err < 0) { + netdev_warn(lif->netdev, "no vector for %s: %d\n", + name, err); + goto err_out_free_intr; + } + new->intr.vector = err; + ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index, + IONIC_INTR_MASK_SET); + + new->intr.cpu = new->intr.index % num_online_cpus(); + if (cpu_online(new->intr.cpu)) + cpumask_set_cpu(new->intr.cpu, + &new->intr.affinity_mask); + } else { + new->intr.index = INTR_INDEX_NOT_ASSIGNED; + } + + new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs, + GFP_KERNEL); + if (!new->cq.info) { + netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); + err = -ENOMEM; + goto err_out_free_intr; + } + + err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); + if (err) { + netdev_err(lif->netdev, "Cannot initialize completion queue\n"); + goto err_out_free_intr; + } + + new->base = dma_alloc_coherent(dev, total_size, &new->base_pa, + GFP_KERNEL); + if (!new->base) { + netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); + err = -ENOMEM; + goto err_out_free_intr; + } + + new->total_size = total_size; + + q_base = new->base; + q_base_pa = new->base_pa; + + cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE); + cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE); + + if (flags & IONIC_QCQ_F_SG) { + sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size, + PAGE_SIZE); + sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE); + ionic_q_sg_map(&new->q, sg_base, sg_base_pa); + } + + ionic_q_map(&new->q, q_base, q_base_pa); + ionic_cq_map(&new->cq, cq_base, cq_base_pa); + ionic_cq_bind(&new->cq, &new->q); + + *qcq = new; + + return 0; + +err_out_free_intr: + ionic_intr_free(lif, new->intr.index); +err_out: + dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); + return err; +} + +static int ionic_qcqs_alloc(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + unsigned int q_list_size; + unsigned int flags; + int err; + int i; + + flags = IONIC_QCQ_F_INTR; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, + IONIC_ADMINQ_LENGTH, + sizeof(struct ionic_admin_cmd), + sizeof(struct ionic_admin_comp), + 0, lif->kern_pid, &lif->adminqcq); + if (err) + return err; + + if (lif->ionic->nnqs_per_lif) { + flags = IONIC_QCQ_F_NOTIFYQ; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", + flags, IONIC_NOTIFYQ_LENGTH, + sizeof(struct ionic_notifyq_cmd), + sizeof(union ionic_notifyq_comp), + 0, lif->kern_pid, &lif->notifyqcq); + if (err) + goto err_out_free_adminqcq; + + /* Let the notifyq ride on the adminq interrupt */ + ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); + } + + q_list_size = sizeof(*lif->txqcqs) * lif->nxqs; + err = -ENOMEM; + lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); + if (!lif->txqcqs) + goto err_out_free_notifyqcq; + for (i = 0; i < lif->nxqs; i++) { + lif->txqcqs[i].stats = devm_kzalloc(dev, + sizeof(struct ionic_q_stats), + GFP_KERNEL); + if (!lif->txqcqs[i].stats) + goto err_out_free_tx_stats; + } + + lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); + if (!lif->rxqcqs) + goto err_out_free_tx_stats; + for (i = 0; i < lif->nxqs; i++) { + lif->rxqcqs[i].stats = devm_kzalloc(dev, + sizeof(struct ionic_q_stats), + GFP_KERNEL); + if (!lif->rxqcqs[i].stats) + goto err_out_free_rx_stats; + } + + return 0; + +err_out_free_rx_stats: + for (i = 0; i < lif->nxqs; i++) + if (lif->rxqcqs[i].stats) + devm_kfree(dev, lif->rxqcqs[i].stats); + devm_kfree(dev, lif->rxqcqs); + lif->rxqcqs = NULL; +err_out_free_tx_stats: + for (i = 0; i < lif->nxqs; i++) + if (lif->txqcqs[i].stats) + devm_kfree(dev, lif->txqcqs[i].stats); + devm_kfree(dev, lif->txqcqs); + lif->txqcqs = NULL; +err_out_free_notifyqcq: + if (lif->notifyqcq) { + ionic_qcq_free(lif, lif->notifyqcq); + lif->notifyqcq = NULL; + } +err_out_free_adminqcq: + ionic_qcq_free(lif, lif->adminqcq); + lif->adminqcq = NULL; + + return err; +} + +static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .index = cpu_to_le32(q->index), + .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | + IONIC_QINIT_F_SG), + .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index), + .pid = cpu_to_le16(q->pid), + .ring_size = ilog2(q->num_descs), + .ring_base = cpu_to_le64(q->base_pa), + .cq_ring_base = cpu_to_le64(cq->base_pa), + .sg_ring_base = cpu_to_le64(q->sg_base_pa), + }, + }; + int err; + + dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); + dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); + dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); + dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); + + qcq->flags |= IONIC_QCQ_F_INITED; + + ionic_debugfs_add_qcq(lif, qcq); + + return 0; +} + +static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .index = cpu_to_le32(q->index), + .flags = cpu_to_le16(IONIC_QINIT_F_IRQ), + .intr_index = cpu_to_le16(cq->bound_intr->index), + .pid = cpu_to_le16(q->pid), + .ring_size = ilog2(q->num_descs), + .ring_base = cpu_to_le64(q->base_pa), + .cq_ring_base = cpu_to_le64(cq->base_pa), + }, + }; + int err; + + dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); + dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); + dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); + dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); + + netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, + NAPI_POLL_WEIGHT); + + err = ionic_request_irq(lif, qcq); + if (err) { + netif_napi_del(&qcq->napi); + return err; + } + + qcq->flags |= IONIC_QCQ_F_INITED; + + ionic_debugfs_add_qcq(lif, qcq); + + return 0; +} + +static bool ionic_notifyq_service(struct ionic_cq *cq, + struct ionic_cq_info *cq_info) +{ + union ionic_notifyq_comp *comp = cq_info->cq_desc; + struct net_device *netdev; + struct ionic_queue *q; + struct ionic_lif *lif; + u64 eid; + + q = cq->bound_q; + lif = q->info[0].cb_arg; + netdev = lif->netdev; + eid = le64_to_cpu(comp->event.eid); + + /* Have we run out of new completions to process? */ + if (eid <= lif->last_eid) + return false; + + lif->last_eid = eid; + + dev_dbg(lif->ionic->dev, "notifyq event:\n"); + dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, + comp, sizeof(*comp), true); + + switch (le16_to_cpu(comp->event.ecode)) { + case IONIC_EVENT_LINK_CHANGE: + ionic_link_status_check_request(lif); + break; + case IONIC_EVENT_RESET: + netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n", + eid); + netdev_info(netdev, " reset_code=%d state=%d\n", + comp->reset.reset_code, + comp->reset.state); + break; + default: + netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n", + comp->event.ecode, eid); + break; + } + + return true; +} + +static int ionic_notifyq_clean(struct ionic_lif *lif, int budget) +{ + struct ionic_dev *idev = &lif->ionic->idev; + struct ionic_cq *cq = &lif->notifyqcq->cq; + u32 work_done; + + work_done = ionic_cq_service(cq, budget, ionic_notifyq_service, + NULL, NULL); + if (work_done) + ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, + work_done, IONIC_INTR_CRED_RESET_COALESCE); + + return work_done; +} + +static bool ionic_adminq_service(struct ionic_cq *cq, + struct ionic_cq_info *cq_info) +{ + struct ionic_admin_comp *comp = cq_info->cq_desc; + + if (!color_match(comp->color, cq->done_color)) + return false; + + ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); + + return true; +} + +static int ionic_adminq_napi(struct napi_struct *napi, int budget) +{ + struct ionic_lif *lif = napi_to_cq(napi)->lif; + int n_work = 0; + int a_work = 0; + + if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)) + n_work = ionic_notifyq_clean(lif, budget); + a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL); + + return max(n_work, a_work); +} + +static void ionic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *ns) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_lif_stats *ls; + + memset(ns, 0, sizeof(*ns)); + ls = &lif->info->stats; + + ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + + le64_to_cpu(ls->rx_mcast_packets) + + le64_to_cpu(ls->rx_bcast_packets); + + ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + + le64_to_cpu(ls->tx_mcast_packets) + + le64_to_cpu(ls->tx_bcast_packets); + + ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + + le64_to_cpu(ls->rx_mcast_bytes) + + le64_to_cpu(ls->rx_bcast_bytes); + + ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + + le64_to_cpu(ls->tx_mcast_bytes) + + le64_to_cpu(ls->tx_bcast_bytes); + + ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + + le64_to_cpu(ls->rx_mcast_drop_packets) + + le64_to_cpu(ls->rx_bcast_drop_packets); + + ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + + le64_to_cpu(ls->tx_mcast_drop_packets) + + le64_to_cpu(ls->tx_bcast_drop_packets); + + ns->multicast = le64_to_cpu(ls->rx_mcast_packets); + + ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); + + ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + + le64_to_cpu(ls->rx_queue_disabled) + + le64_to_cpu(ls->rx_desc_fetch_error) + + le64_to_cpu(ls->rx_desc_data_error); + + ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + + le64_to_cpu(ls->tx_queue_disabled) + + le64_to_cpu(ls->tx_desc_fetch_error) + + le64_to_cpu(ls->tx_desc_data_error); + + ns->rx_errors = ns->rx_over_errors + + ns->rx_missed_errors; + + ns->tx_errors = ns->tx_aborted_errors; +} + +static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .lif_index = cpu_to_le16(lif->index), + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }, + }; + struct ionic_rx_filter *f; + int err; + + /* don't bother if we already have it */ + spin_lock_bh(&lif->rx_filters.lock); + f = ionic_rx_filter_by_addr(lif, addr); + spin_unlock_bh(&lif->rx_filters.lock); + if (f) + return 0; + + netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr, + ctx.comp.rx_filter_add.filter_id); + + memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); +} + +static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .lif_index = cpu_to_le16(lif->index), + }, + }; + struct ionic_rx_filter *f; + int err; + + spin_lock_bh(&lif->rx_filters.lock); + f = ionic_rx_filter_by_addr(lif, addr); + if (!f) { + spin_unlock_bh(&lif->rx_filters.lock); + return -ENOENT; + } + + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); + ionic_rx_filter_free(lif, f); + spin_unlock_bh(&lif->rx_filters.lock); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, + ctx.cmd.rx_filter_del.filter_id); + + return 0; +} + +static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) +{ + struct ionic *ionic = lif->ionic; + struct ionic_deferred_work *work; + unsigned int nmfilters; + unsigned int nufilters; + + if (add) { + /* Do we have space for this filter? We test the counters + * here before checking the need for deferral so that we + * can return an overflow error to the stack. + */ + nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters); + nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters); + + if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) + lif->nmcast++; + else if (!is_multicast_ether_addr(addr) && + lif->nucast < nufilters) + lif->nucast++; + else + return -ENOSPC; + } else { + if (is_multicast_ether_addr(addr) && lif->nmcast) + lif->nmcast--; + else if (!is_multicast_ether_addr(addr) && lif->nucast) + lif->nucast--; + } + + if (in_interrupt()) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "%s OOM\n", __func__); + return -ENOMEM; + } + work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : + IONIC_DW_TYPE_RX_ADDR_DEL; + memcpy(work->addr, addr, ETH_ALEN); + netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n", + add ? "add" : "del", addr); + ionic_lif_deferred_enqueue(&lif->deferred, work); + } else { + netdev_dbg(lif->netdev, "rx_filter %s %pM\n", + add ? "add" : "del", addr); + if (add) + return ionic_lif_addr_add(lif, addr); + else + return ionic_lif_addr_del(lif, addr); + } + + return 0; +} + +static int ionic_addr_add(struct net_device *netdev, const u8 *addr) +{ + return ionic_lif_addr(netdev_priv(netdev), addr, true); +} + +static int ionic_addr_del(struct net_device *netdev, const u8 *addr) +{ + return ionic_lif_addr(netdev_priv(netdev), addr, false); +} + +static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_mode_set = { + .opcode = IONIC_CMD_RX_MODE_SET, + .lif_index = cpu_to_le16(lif->index), + .rx_mode = cpu_to_le16(rx_mode), + }, + }; + char buf[128]; + int err; + int i; +#define REMAIN(__x) (sizeof(buf) - (__x)) + + i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", + lif->rx_mode, rx_mode); + if (rx_mode & IONIC_RX_MODE_F_UNICAST) + i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); + if (rx_mode & IONIC_RX_MODE_F_MULTICAST) + i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); + if (rx_mode & IONIC_RX_MODE_F_BROADCAST) + i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); + if (rx_mode & IONIC_RX_MODE_F_PROMISC) + i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); + if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) + i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); + netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n", + rx_mode, err); + else + lif->rx_mode = rx_mode; +} + +static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) +{ + struct ionic_deferred_work *work; + + if (in_interrupt()) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "%s OOM\n", __func__); + return; + } + work->type = IONIC_DW_TYPE_RX_MODE; + work->rx_mode = rx_mode; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); + } else { + ionic_lif_rx_mode(lif, rx_mode); + } +} + +static void ionic_set_rx_mode(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_identity *ident; + unsigned int nfilters; + unsigned int rx_mode; + + ident = &lif->ionic->ident; + + rx_mode = IONIC_RX_MODE_F_UNICAST; + rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; + rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; + rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; + rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; + + /* sync unicast addresses + * next check to see if we're in an overflow state + * if so, we track that we overflowed and enable NIC PROMISC + * else if the overflow is set and not needed + * we remove our overflow flag and check the netdev flags + * to see if we can disable NIC PROMISC + */ + __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); + nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters); + if (netdev_uc_count(netdev) + 1 > nfilters) { + rx_mode |= IONIC_RX_MODE_F_PROMISC; + lif->uc_overflow = true; + } else if (lif->uc_overflow) { + lif->uc_overflow = false; + if (!(netdev->flags & IFF_PROMISC)) + rx_mode &= ~IONIC_RX_MODE_F_PROMISC; + } + + /* same for multicast */ + __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); + nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters); + if (netdev_mc_count(netdev) > nfilters) { + rx_mode |= IONIC_RX_MODE_F_ALLMULTI; + lif->mc_overflow = true; + } else if (lif->mc_overflow) { + lif->mc_overflow = false; + if (!(netdev->flags & IFF_ALLMULTI)) + rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; + } + + if (lif->rx_mode != rx_mode) + _ionic_lif_rx_mode(lif, rx_mode); +} + +static __le64 ionic_netdev_features_to_nic(netdev_features_t features) +{ + u64 wanted = 0; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + wanted |= IONIC_ETH_HW_VLAN_TX_TAG; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; + if (features & NETIF_F_RXHASH) + wanted |= IONIC_ETH_HW_RX_HASH; + if (features & NETIF_F_RXCSUM) + wanted |= IONIC_ETH_HW_RX_CSUM; + if (features & NETIF_F_SG) + wanted |= IONIC_ETH_HW_TX_SG; + if (features & NETIF_F_HW_CSUM) + wanted |= IONIC_ETH_HW_TX_CSUM; + if (features & NETIF_F_TSO) + wanted |= IONIC_ETH_HW_TSO; + if (features & NETIF_F_TSO6) + wanted |= IONIC_ETH_HW_TSO_IPV6; + if (features & NETIF_F_TSO_ECN) + wanted |= IONIC_ETH_HW_TSO_ECN; + if (features & NETIF_F_GSO_GRE) + wanted |= IONIC_ETH_HW_TSO_GRE; + if (features & NETIF_F_GSO_GRE_CSUM) + wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; + if (features & NETIF_F_GSO_IPXIP4) + wanted |= IONIC_ETH_HW_TSO_IPXIP4; + if (features & NETIF_F_GSO_IPXIP6) + wanted |= IONIC_ETH_HW_TSO_IPXIP6; + if (features & NETIF_F_GSO_UDP_TUNNEL) + wanted |= IONIC_ETH_HW_TSO_UDP; + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; + + return cpu_to_le64(wanted); +} + +static int ionic_set_nic_features(struct ionic_lif *lif, + netdev_features_t features) +{ + struct device *dev = lif->ionic->dev; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_FEATURES, + }, + }; + u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | + IONIC_ETH_HW_VLAN_RX_STRIP | + IONIC_ETH_HW_VLAN_RX_FILTER; + int err; + + ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & + ctx.comp.lif_setattr.features); + + if ((vlan_flags & features) && + !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) + dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); + + if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) + dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) + dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) + dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); + if (lif->hw_features & IONIC_ETH_HW_RX_HASH) + dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); + if (lif->hw_features & IONIC_ETH_HW_TX_SG) + dev_dbg(dev, "feature ETH_HW_TX_SG\n"); + if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) + dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); + if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) + dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO) + dev_dbg(dev, "feature ETH_HW_TSO\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) + dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) + dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) + dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) + dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) + dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) + dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) + dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) + dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); + + return 0; +} + +static int ionic_init_nic_features(struct ionic_lif *lif) +{ + struct net_device *netdev = lif->netdev; + netdev_features_t features; + int err; + + /* set up what we expect to support by default */ + features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN; + + err = ionic_set_nic_features(lif, features); + if (err) + return err; + + /* tell the netdev what we actually can support */ + netdev->features |= NETIF_F_HIGHDMA; + + if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (lif->hw_features & IONIC_ETH_HW_RX_HASH) + netdev->hw_features |= NETIF_F_RXHASH; + if (lif->hw_features & IONIC_ETH_HW_TX_SG) + netdev->hw_features |= NETIF_F_SG; + + if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) + netdev->hw_enc_features |= NETIF_F_HW_CSUM; + if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) + netdev->hw_enc_features |= NETIF_F_RXCSUM; + if (lif->hw_features & IONIC_ETH_HW_TSO) + netdev->hw_enc_features |= NETIF_F_TSO; + if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) + netdev->hw_enc_features |= NETIF_F_TSO6; + if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) + netdev->hw_enc_features |= NETIF_F_TSO_ECN; + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) + netdev->hw_enc_features |= NETIF_F_GSO_GRE; + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) + netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) + netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) + netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_features |= netdev->hw_enc_features; + netdev->features |= netdev->hw_features; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + return 0; +} + +static int ionic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err; + + netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", + __func__, (u64)lif->netdev->features, (u64)features); + + err = ionic_set_nic_features(lif, features); + + return err; +} + +static int ionic_set_mac_address(struct net_device *netdev, void *sa) +{ + struct sockaddr *addr = sa; + u8 *mac; + int err; + + mac = (u8 *)addr->sa_data; + if (ether_addr_equal(netdev->dev_addr, mac)) + return 0; + + err = eth_prepare_mac_addr_change(netdev, addr); + if (err) + return err; + + if (!is_zero_ether_addr(netdev->dev_addr)) { + netdev_info(netdev, "deleting mac addr %pM\n", + netdev->dev_addr); + ionic_addr_del(netdev, netdev->dev_addr); + } + + eth_commit_mac_addr_change(netdev, addr); + netdev_info(netdev, "updating mac addr %pM\n", mac); + + return ionic_addr_add(netdev, mac); +} + +static int ionic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_MTU, + .mtu = cpu_to_le32(new_mtu), + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + netdev->mtu = new_mtu; + err = ionic_reset_queues(lif); + + return err; +} + +static void ionic_tx_timeout_work(struct work_struct *ws) +{ + struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); + + netdev_info(lif->netdev, "Tx Timeout recovery\n"); + + rtnl_lock(); + ionic_reset_queues(lif); + rtnl_unlock(); +} + +static void ionic_tx_timeout(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + schedule_work(&lif->tx_timeout_work); +} + +static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .lif_index = cpu_to_le16(lif->index), + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), + .vlan.vlan = cpu_to_le16(vid), + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid, + ctx.comp.rx_filter_add.filter_id); + + return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); +} + +static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .lif_index = cpu_to_le16(lif->index), + }, + }; + struct ionic_rx_filter *f; + + spin_lock_bh(&lif->rx_filters.lock); + + f = ionic_rx_filter_by_vlan(lif, vid); + if (!f) { + spin_unlock_bh(&lif->rx_filters.lock); + return -ENOENT; + } + + netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid, + le32_to_cpu(ctx.cmd.rx_filter_del.filter_id)); + + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); + ionic_rx_filter_free(lif, f); + spin_unlock_bh(&lif->rx_filters.lock); + + return ionic_adminq_post_wait(lif, &ctx); +} + +int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, + const u8 *key, const u32 *indir) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .attr = IONIC_LIF_ATTR_RSS, + .rss.types = cpu_to_le16(types), + .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), + }, + }; + unsigned int i, tbl_sz; + + lif->rss_types = types; + + if (key) + memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); + + if (indir) { + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + for (i = 0; i < tbl_sz; i++) + lif->rss_ind_tbl[i] = indir[i]; + } + + memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, + IONIC_RSS_HASH_KEY_SIZE); + + return ionic_adminq_post_wait(lif, &ctx); +} + +static int ionic_lif_rss_init(struct ionic_lif *lif) +{ + u8 rss_key[IONIC_RSS_HASH_KEY_SIZE]; + unsigned int tbl_sz; + unsigned int i; + + netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE); + + lif->rss_types = IONIC_RSS_TYPE_IPV4 | + IONIC_RSS_TYPE_IPV4_TCP | + IONIC_RSS_TYPE_IPV4_UDP | + IONIC_RSS_TYPE_IPV6 | + IONIC_RSS_TYPE_IPV6_TCP | + IONIC_RSS_TYPE_IPV6_UDP; + + /* Fill indirection table with 'default' values */ + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + for (i = 0; i < tbl_sz; i++) + lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); + + return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL); +} + +static int ionic_lif_rss_deinit(struct ionic_lif *lif) +{ + return ionic_lif_rss_config(lif, 0x0, NULL, NULL); +} + +static void ionic_txrx_disable(struct ionic_lif *lif) +{ + unsigned int i; + + for (i = 0; i < lif->nxqs; i++) { + ionic_qcq_disable(lif->txqcqs[i].qcq); + ionic_qcq_disable(lif->rxqcqs[i].qcq); + } +} + +static void ionic_txrx_deinit(struct ionic_lif *lif) +{ + unsigned int i; + + for (i = 0; i < lif->nxqs; i++) { + ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); + ionic_tx_flush(&lif->txqcqs[i].qcq->cq); + + ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); + ionic_rx_flush(&lif->rxqcqs[i].qcq->cq); + ionic_rx_empty(&lif->rxqcqs[i].qcq->q); + } +} + +static void ionic_txrx_free(struct ionic_lif *lif) +{ + unsigned int i; + + for (i = 0; i < lif->nxqs; i++) { + ionic_qcq_free(lif, lif->txqcqs[i].qcq); + lif->txqcqs[i].qcq = NULL; + + ionic_qcq_free(lif, lif->rxqcqs[i].qcq); + lif->rxqcqs[i].qcq = NULL; + } +} + +static int ionic_txrx_alloc(struct ionic_lif *lif) +{ + unsigned int flags; + unsigned int i; + int err = 0; + u32 coal; + + flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; + for (i = 0; i < lif->nxqs; i++) { + err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, + lif->ntxq_descs, + sizeof(struct ionic_txq_desc), + sizeof(struct ionic_txq_comp), + sizeof(struct ionic_txq_sg_desc), + lif->kern_pid, &lif->txqcqs[i].qcq); + if (err) + goto err_out; + + lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats; + } + + flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_INTR; + coal = ionic_coal_usec_to_hw(lif->ionic, lif->rx_coalesce_usecs); + for (i = 0; i < lif->nxqs; i++) { + err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, + lif->nrxq_descs, + sizeof(struct ionic_rxq_desc), + sizeof(struct ionic_rxq_comp), + 0, lif->kern_pid, &lif->rxqcqs[i].qcq); + if (err) + goto err_out; + + lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats; + + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->rxqcqs[i].qcq->intr.index, coal); + ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq, + lif->txqcqs[i].qcq); + } + + return 0; + +err_out: + ionic_txrx_free(lif); + + return err; +} + +static int ionic_txrx_init(struct ionic_lif *lif) +{ + unsigned int i; + int err; + + for (i = 0; i < lif->nxqs; i++) { + err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq); + if (err) + goto err_out; + + err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq); + if (err) { + ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); + goto err_out; + } + } + + if (lif->netdev->features & NETIF_F_RXHASH) + ionic_lif_rss_init(lif); + + ionic_set_rx_mode(lif->netdev); + + return 0; + +err_out: + while (i--) { + ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); + ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); + } + + return err; +} + +static int ionic_txrx_enable(struct ionic_lif *lif) +{ + int i, err; + + for (i = 0; i < lif->nxqs; i++) { + err = ionic_qcq_enable(lif->txqcqs[i].qcq); + if (err) + goto err_out; + + ionic_rx_fill(&lif->rxqcqs[i].qcq->q); + err = ionic_qcq_enable(lif->rxqcqs[i].qcq); + if (err) { + ionic_qcq_disable(lif->txqcqs[i].qcq); + goto err_out; + } + } + + return 0; + +err_out: + while (i--) { + ionic_qcq_disable(lif->rxqcqs[i].qcq); + ionic_qcq_disable(lif->txqcqs[i].qcq); + } + + return err; +} + +int ionic_open(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err; + + netif_carrier_off(netdev); + + err = ionic_txrx_alloc(lif); + if (err) + return err; + + err = ionic_txrx_init(lif); + if (err) + goto err_txrx_free; + + err = ionic_txrx_enable(lif); + if (err) + goto err_txrx_deinit; + + netif_set_real_num_tx_queues(netdev, lif->nxqs); + netif_set_real_num_rx_queues(netdev, lif->nxqs); + + set_bit(IONIC_LIF_UP, lif->state); + + ionic_link_status_check_request(lif); + if (netif_carrier_ok(netdev)) + netif_tx_wake_all_queues(netdev); + + return 0; + +err_txrx_deinit: + ionic_txrx_deinit(lif); +err_txrx_free: + ionic_txrx_free(lif); + return err; +} + +int ionic_stop(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err = 0; + + if (!test_bit(IONIC_LIF_UP, lif->state)) { + dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n", + __func__, lif->name); + return 0; + } + dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name); + clear_bit(IONIC_LIF_UP, lif->state); + + /* carrier off before disabling queues to avoid watchdog timeout */ + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + + ionic_txrx_disable(lif); + ionic_txrx_deinit(lif); + ionic_txrx_free(lif); + + return err; +} + +static const struct net_device_ops ionic_netdev_ops = { + .ndo_open = ionic_open, + .ndo_stop = ionic_stop, + .ndo_start_xmit = ionic_start_xmit, + .ndo_get_stats64 = ionic_get_stats64, + .ndo_set_rx_mode = ionic_set_rx_mode, + .ndo_set_features = ionic_set_features, + .ndo_set_mac_address = ionic_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = ionic_tx_timeout, + .ndo_change_mtu = ionic_change_mtu, + .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, +}; + +int ionic_reset_queues(struct ionic_lif *lif) +{ + bool running; + int err = 0; + + /* Put off the next watchdog timeout */ + netif_trans_update(lif->netdev); + + if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET)) + return -EBUSY; + + running = netif_running(lif->netdev); + if (running) + err = ionic_stop(lif->netdev); + if (!err && running) + ionic_open(lif->netdev); + + clear_bit(IONIC_LIF_QUEUE_RESET, lif->state); + + return err; +} + +static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index) +{ + struct device *dev = ionic->dev; + struct net_device *netdev; + struct ionic_lif *lif; + int tbl_sz; + u32 coal; + int err; + + netdev = alloc_etherdev_mqs(sizeof(*lif), + ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); + if (!netdev) { + dev_err(dev, "Cannot allocate netdev, aborting\n"); + return ERR_PTR(-ENOMEM); + } + + SET_NETDEV_DEV(netdev, dev); + + lif = netdev_priv(netdev); + lif->netdev = netdev; + ionic->master_lif = lif; + netdev->netdev_ops = &ionic_netdev_ops; + ionic_ethtool_set_ops(netdev); + + netdev->watchdog_timeo = 2 * HZ; + netdev->min_mtu = IONIC_MIN_MTU; + netdev->max_mtu = IONIC_MAX_MTU; + + lif->neqs = ionic->neqs_per_lif; + lif->nxqs = ionic->ntxqs_per_lif; + + lif->ionic = ionic; + lif->index = index; + lif->ntxq_descs = IONIC_DEF_TXRX_DESC; + lif->nrxq_descs = IONIC_DEF_TXRX_DESC; + + /* Convert the default coalesce value to actual hw resolution */ + coal = ionic_coal_usec_to_hw(lif->ionic, IONIC_ITR_COAL_USEC_DEFAULT); + lif->rx_coalesce_usecs = ionic_coal_hw_to_usec(lif->ionic, coal); + + snprintf(lif->name, sizeof(lif->name), "lif%u", index); + + spin_lock_init(&lif->adminq_lock); + + spin_lock_init(&lif->deferred.lock); + INIT_LIST_HEAD(&lif->deferred.list); + INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); + + /* allocate lif info */ + lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); + lif->info = dma_alloc_coherent(dev, lif->info_sz, + &lif->info_pa, GFP_KERNEL); + if (!lif->info) { + dev_err(dev, "Failed to allocate lif info, aborting\n"); + err = -ENOMEM; + goto err_out_free_netdev; + } + + /* allocate queues */ + err = ionic_qcqs_alloc(lif); + if (err) + goto err_out_free_lif_info; + + /* allocate rss indirection table */ + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; + lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, + &lif->rss_ind_tbl_pa, + GFP_KERNEL); + + if (!lif->rss_ind_tbl) { + dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); + goto err_out_free_qcqs; + } + + list_add_tail(&lif->list, &ionic->lifs); + + return lif; + +err_out_free_qcqs: + ionic_qcqs_free(lif); +err_out_free_lif_info: + dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); + lif->info = NULL; + lif->info_pa = 0; +err_out_free_netdev: + free_netdev(lif->netdev); + lif = NULL; + + return ERR_PTR(err); +} + +int ionic_lifs_alloc(struct ionic *ionic) +{ + struct ionic_lif *lif; + + INIT_LIST_HEAD(&ionic->lifs); + + /* only build the first lif, others are for later features */ + set_bit(0, ionic->lifbits); + lif = ionic_lif_alloc(ionic, 0); + + return PTR_ERR_OR_ZERO(lif); +} + +static void ionic_lif_reset(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->ionic->idev; + + mutex_lock(&lif->ionic->dev_cmd_lock); + ionic_dev_cmd_lif_reset(idev, lif->index); + ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); + mutex_unlock(&lif->ionic->dev_cmd_lock); +} + +static void ionic_lif_free(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + + /* free rss indirection table */ + dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, + lif->rss_ind_tbl_pa); + lif->rss_ind_tbl = NULL; + lif->rss_ind_tbl_pa = 0; + + /* free queues */ + ionic_qcqs_free(lif); + ionic_lif_reset(lif); + + /* free lif info */ + dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); + lif->info = NULL; + lif->info_pa = 0; + + /* unmap doorbell page */ + ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); + lif->kern_dbpage = NULL; + kfree(lif->dbid_inuse); + lif->dbid_inuse = NULL; + + /* free netdev & lif */ + ionic_debugfs_del_lif(lif); + list_del(&lif->list); + free_netdev(lif->netdev); +} + +void ionic_lifs_free(struct ionic *ionic) +{ + struct list_head *cur, *tmp; + struct ionic_lif *lif; + + list_for_each_safe(cur, tmp, &ionic->lifs) { + lif = list_entry(cur, struct ionic_lif, list); + + ionic_lif_free(lif); + } +} + +static void ionic_lif_deinit(struct ionic_lif *lif) +{ + if (!test_bit(IONIC_LIF_INITED, lif->state)) + return; + + clear_bit(IONIC_LIF_INITED, lif->state); + + ionic_rx_filters_deinit(lif); + ionic_lif_rss_deinit(lif); + + napi_disable(&lif->adminqcq->napi); + ionic_lif_qcq_deinit(lif, lif->notifyqcq); + ionic_lif_qcq_deinit(lif, lif->adminqcq); + + ionic_lif_reset(lif); +} + +void ionic_lifs_deinit(struct ionic *ionic) +{ + struct list_head *cur, *tmp; + struct ionic_lif *lif; + + list_for_each_safe(cur, tmp, &ionic->lifs) { + lif = list_entry(cur, struct ionic_lif, list); + ionic_lif_deinit(lif); + } +} + +static int ionic_lif_adminq_init(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + struct ionic_q_init_comp comp; + struct ionic_dev *idev; + struct ionic_qcq *qcq; + struct ionic_queue *q; + int err; + + idev = &lif->ionic->idev; + qcq = lif->adminqcq; + q = &qcq->q; + + mutex_lock(&lif->ionic->dev_cmd_lock); + ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); + err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); + ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); + mutex_unlock(&lif->ionic->dev_cmd_lock); + if (err) { + netdev_err(lif->netdev, "adminq init failed %d\n", err); + return err; + } + + q->hw_type = comp.hw_type; + q->hw_index = le32_to_cpu(comp.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); + + netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, + NAPI_POLL_WEIGHT); + + err = ionic_request_irq(lif, qcq); + if (err) { + netdev_warn(lif->netdev, "adminq irq request failed %d\n", err); + netif_napi_del(&qcq->napi); + return err; + } + + napi_enable(&qcq->napi); + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + + qcq->flags |= IONIC_QCQ_F_INITED; + + ionic_debugfs_add_qcq(lif, qcq); + + return 0; +} + +static int ionic_lif_notifyq_init(struct ionic_lif *lif) +{ + struct ionic_qcq *qcq = lif->notifyqcq; + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + int err; + + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .index = cpu_to_le32(q->index), + .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | + IONIC_QINIT_F_ENA), + .intr_index = cpu_to_le16(lif->adminqcq->intr.index), + .pid = cpu_to_le16(q->pid), + .ring_size = ilog2(q->num_descs), + .ring_base = cpu_to_le64(q->base_pa), + } + }; + + dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); + dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); + dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); + dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); + + /* preset the callback info */ + q->info[0].cb_arg = lif; + + qcq->flags |= IONIC_QCQ_F_INITED; + + ionic_debugfs_add_qcq(lif, qcq); + + return 0; +} + +static int ionic_station_set(struct ionic_lif *lif) +{ + struct net_device *netdev = lif->netdev; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_getattr = { + .opcode = IONIC_CMD_LIF_GETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_MAC, + }, + }; + struct sockaddr addr; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); + addr.sa_family = AF_INET; + err = eth_prepare_mac_addr_change(netdev, &addr); + if (err) + return err; + + if (!is_zero_ether_addr(netdev->dev_addr)) { + netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n", + netdev->dev_addr); + ionic_lif_addr(lif, netdev->dev_addr, false); + } + + eth_commit_mac_addr_change(netdev, &addr); + netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", + netdev->dev_addr); + ionic_lif_addr(lif, netdev->dev_addr, true); + + return 0; +} + +static int ionic_lif_init(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->ionic->idev; + struct device *dev = lif->ionic->dev; + struct ionic_lif_init_comp comp; + int dbpage_num; + int err; + + ionic_debugfs_add_lif(lif); + + mutex_lock(&lif->ionic->dev_cmd_lock); + ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); + err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); + ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); + mutex_unlock(&lif->ionic->dev_cmd_lock); + if (err) + return err; + + lif->hw_index = le16_to_cpu(comp.hw_index); + + /* now that we have the hw_index we can figure out our doorbell page */ + lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); + if (!lif->dbid_count) { + dev_err(dev, "No doorbell pages, aborting\n"); + return -EINVAL; + } + + lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); + if (!lif->dbid_inuse) { + dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); + return -ENOMEM; + } + + /* first doorbell id reserved for kernel (dbid aka pid == zero) */ + set_bit(0, lif->dbid_inuse); + lif->kern_pid = 0; + + dbpage_num = ionic_db_page_num(lif, lif->kern_pid); + lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); + if (!lif->kern_dbpage) { + dev_err(dev, "Cannot map dbpage, aborting\n"); + err = -ENOMEM; + goto err_out_free_dbid; + } + + err = ionic_lif_adminq_init(lif); + if (err) + goto err_out_adminq_deinit; + + if (lif->ionic->nnqs_per_lif) { + err = ionic_lif_notifyq_init(lif); + if (err) + goto err_out_notifyq_deinit; + } + + err = ionic_init_nic_features(lif); + if (err) + goto err_out_notifyq_deinit; + + err = ionic_rx_filters_init(lif); + if (err) + goto err_out_notifyq_deinit; + + err = ionic_station_set(lif); + if (err) + goto err_out_notifyq_deinit; + + lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; + + set_bit(IONIC_LIF_INITED, lif->state); + + INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); + + return 0; + +err_out_notifyq_deinit: + ionic_lif_qcq_deinit(lif, lif->notifyqcq); +err_out_adminq_deinit: + ionic_lif_qcq_deinit(lif, lif->adminqcq); + ionic_lif_reset(lif); + ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); + lif->kern_dbpage = NULL; +err_out_free_dbid: + kfree(lif->dbid_inuse); + lif->dbid_inuse = NULL; + + return err; +} + +int ionic_lifs_init(struct ionic *ionic) +{ + struct list_head *cur, *tmp; + struct ionic_lif *lif; + int err; + + list_for_each_safe(cur, tmp, &ionic->lifs) { + lif = list_entry(cur, struct ionic_lif, list); + err = ionic_lif_init(lif); + if (err) + return err; + } + + return 0; +} + +static void ionic_lif_notify_work(struct work_struct *ws) +{ +} + +static void ionic_lif_set_netdev_info(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_NAME, + }, + }; + + strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, + sizeof(ctx.cmd.lif_setattr.name)); + + ionic_adminq_post_wait(lif, &ctx); +} + +static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) +{ + if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) + return NULL; + + return netdev_priv(netdev); +} + +static int ionic_lif_notify(struct notifier_block *nb, + unsigned long event, void *info) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(info); + struct ionic *ionic = container_of(nb, struct ionic, nb); + struct ionic_lif *lif = ionic_netdev_lif(ndev); + + if (!lif || lif->ionic != ionic) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGENAME: + ionic_lif_set_netdev_info(lif); + break; + } + + return NOTIFY_DONE; +} + +int ionic_lifs_register(struct ionic *ionic) +{ + int err; + + INIT_WORK(&ionic->nb_work, ionic_lif_notify_work); + + ionic->nb.notifier_call = ionic_lif_notify; + + err = register_netdevice_notifier(&ionic->nb); + if (err) + ionic->nb.notifier_call = NULL; + + /* only register LIF0 for now */ + err = register_netdev(ionic->master_lif->netdev); + if (err) { + dev_err(ionic->dev, "Cannot register net device, aborting\n"); + return err; + } + + ionic_link_status_check_request(ionic->master_lif); + ionic->master_lif->registered = true; + + return 0; +} + +void ionic_lifs_unregister(struct ionic *ionic) +{ + if (ionic->nb.notifier_call) { + unregister_netdevice_notifier(&ionic->nb); + cancel_work_sync(&ionic->nb_work); + ionic->nb.notifier_call = NULL; + } + + /* There is only one lif ever registered in the + * current model, so don't bother searching the + * ionic->lif for candidates to unregister + */ + cancel_work_sync(&ionic->master_lif->deferred.work); + cancel_work_sync(&ionic->master_lif->tx_timeout_work); + if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(ionic->master_lif->netdev); +} + +int ionic_lif_identify(struct ionic *ionic, u8 lif_type, + union ionic_lif_identity *lid) +{ + struct ionic_dev *idev = &ionic->idev; + size_t sz; + int err; + + sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return (err); + + dev_dbg(ionic->dev, "capabilities 0x%llx\n", + le64_to_cpu(lid->capabilities)); + + dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", + le32_to_cpu(lid->eth.max_ucast_filters)); + dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", + le32_to_cpu(lid->eth.max_mcast_filters)); + dev_dbg(ionic->dev, "eth.features 0x%llx\n", + le64_to_cpu(lid->eth.config.features)); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); + dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); + dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); + dev_dbg(ionic->dev, "eth.config.mtu %d\n", + le32_to_cpu(lid->eth.config.mtu)); + + return 0; +} + +int ionic_lifs_size(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + unsigned int nintrs, dev_nintrs; + union ionic_lif_config *lc; + unsigned int ntxqs_per_lif; + unsigned int nrxqs_per_lif; + unsigned int neqs_per_lif; + unsigned int nnqs_per_lif; + unsigned int nxqs, neqs; + unsigned int min_intrs; + int err; + + lc = &ident->lif.eth.config; + dev_nintrs = le32_to_cpu(ident->dev.nintrs); + neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); + nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); + ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); + nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); + + nxqs = min(ntxqs_per_lif, nrxqs_per_lif); + nxqs = min(nxqs, num_online_cpus()); + neqs = min(neqs_per_lif, num_online_cpus()); + +try_again: + /* interrupt usage: + * 1 for master lif adminq/notifyq + * 1 for each CPU for master lif TxRx queue pairs + * whatever's left is for RDMA queues + */ + nintrs = 1 + nxqs + neqs; + min_intrs = 2; /* adminq + 1 TxRx queue pair */ + + if (nintrs > dev_nintrs) + goto try_fewer; + + err = ionic_bus_alloc_irq_vectors(ionic, nintrs); + if (err < 0 && err != -ENOSPC) { + dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); + return err; + } + if (err == -ENOSPC) + goto try_fewer; + + if (err != nintrs) { + ionic_bus_free_irq_vectors(ionic); + goto try_fewer; + } + + ionic->nnqs_per_lif = nnqs_per_lif; + ionic->neqs_per_lif = neqs; + ionic->ntxqs_per_lif = nxqs; + ionic->nrxqs_per_lif = nxqs; + ionic->nintrs = nintrs; + + ionic_debugfs_add_sizes(ionic); + + return 0; + +try_fewer: + if (nnqs_per_lif > 1) { + nnqs_per_lif >>= 1; + goto try_again; + } + if (neqs > 1) { + neqs >>= 1; + goto try_again; + } + if (nxqs > 1) { + nxqs >>= 1; + goto try_again; + } + dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); + return -ENOSPC; +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h new file mode 100644 index 000000000000..812190e729c2 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_LIF_H_ +#define _IONIC_LIF_H_ + +#include <linux/pci.h> +#include "ionic_rx_filter.h" + +#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */ +#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */ + +#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1) +#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1) +#define IONIC_RX_COPYBREAK_DEFAULT 256 + +struct ionic_tx_stats { + u64 dma_map_err; + u64 pkts; + u64 bytes; + u64 clean; + u64 linearize; + u64 no_csum; + u64 csum; + u64 crc32_csum; + u64 tso; + u64 frags; + u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR]; +}; + +struct ionic_rx_stats { + u64 dma_map_err; + u64 alloc_err; + u64 pkts; + u64 bytes; + u64 csum_none; + u64 csum_complete; + u64 csum_error; + u64 buffers_posted; +}; + +#define IONIC_QCQ_F_INITED BIT(0) +#define IONIC_QCQ_F_SG BIT(1) +#define IONIC_QCQ_F_INTR BIT(2) +#define IONIC_QCQ_F_TX_STATS BIT(3) +#define IONIC_QCQ_F_RX_STATS BIT(4) +#define IONIC_QCQ_F_NOTIFYQ BIT(5) + +struct ionic_napi_stats { + u64 poll_count; + u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR]; +}; + +struct ionic_q_stats { + union { + struct ionic_tx_stats tx; + struct ionic_rx_stats rx; + }; +}; + +struct ionic_qcq { + void *base; + dma_addr_t base_pa; + unsigned int total_size; + struct ionic_queue q; + struct ionic_cq cq; + struct ionic_intr_info intr; + struct napi_struct napi; + struct ionic_napi_stats napi_stats; + struct ionic_q_stats *stats; + unsigned int flags; + struct dentry *dentry; +}; + +struct ionic_qcqst { + struct ionic_qcq *qcq; + struct ionic_q_stats *stats; +}; + +#define q_to_qcq(q) container_of(q, struct ionic_qcq, q) +#define q_to_tx_stats(q) (&q_to_qcq(q)->stats->tx) +#define q_to_rx_stats(q) (&q_to_qcq(q)->stats->rx) +#define napi_to_qcq(napi) container_of(napi, struct ionic_qcq, napi) +#define napi_to_cq(napi) (&napi_to_qcq(napi)->cq) + +enum ionic_deferred_work_type { + IONIC_DW_TYPE_RX_MODE, + IONIC_DW_TYPE_RX_ADDR_ADD, + IONIC_DW_TYPE_RX_ADDR_DEL, + IONIC_DW_TYPE_LINK_STATUS, + IONIC_DW_TYPE_LIF_RESET, +}; + +struct ionic_deferred_work { + struct list_head list; + enum ionic_deferred_work_type type; + union { + unsigned int rx_mode; + u8 addr[ETH_ALEN]; + }; +}; + +struct ionic_deferred { + spinlock_t lock; /* lock for deferred work list */ + struct list_head list; + struct work_struct work; +}; + +struct ionic_lif_sw_stats { + u64 tx_packets; + u64 tx_bytes; + u64 rx_packets; + u64 rx_bytes; + u64 tx_tso; + u64 tx_no_csum; + u64 tx_csum; + u64 rx_csum_none; + u64 rx_csum_complete; + u64 rx_csum_error; +}; + +enum ionic_lif_state_flags { + IONIC_LIF_INITED, + IONIC_LIF_SW_DEBUG_STATS, + IONIC_LIF_UP, + IONIC_LIF_LINK_CHECK_REQUESTED, + IONIC_LIF_QUEUE_RESET, + + /* leave this as last */ + IONIC_LIF_STATE_SIZE +}; + +#define IONIC_LIF_NAME_MAX_SZ 32 +struct ionic_lif { + char name[IONIC_LIF_NAME_MAX_SZ]; + struct list_head list; + struct net_device *netdev; + DECLARE_BITMAP(state, IONIC_LIF_STATE_SIZE); + struct ionic *ionic; + bool registered; + unsigned int index; + unsigned int hw_index; + unsigned int kern_pid; + u64 __iomem *kern_dbpage; + spinlock_t adminq_lock; /* lock for AdminQ operations */ + struct ionic_qcq *adminqcq; + struct ionic_qcq *notifyqcq; + struct ionic_qcqst *txqcqs; + struct ionic_qcqst *rxqcqs; + u64 last_eid; + unsigned int neqs; + unsigned int nxqs; + unsigned int ntxq_descs; + unsigned int nrxq_descs; + u32 rx_copybreak; + unsigned int rx_mode; + u64 hw_features; + bool mc_overflow; + unsigned int nmcast; + bool uc_overflow; + unsigned int nucast; + + struct ionic_lif_info *info; + dma_addr_t info_pa; + u32 info_sz; + + u16 rss_types; + u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE]; + u8 *rss_ind_tbl; + dma_addr_t rss_ind_tbl_pa; + u32 rss_ind_tbl_sz; + + struct ionic_rx_filters rx_filters; + struct ionic_deferred deferred; + unsigned long *dbid_inuse; + unsigned int dbid_count; + struct dentry *dentry; + u32 rx_coalesce_usecs; + u32 flags; + struct work_struct tx_timeout_work; +}; + +#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq) +#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq) +#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q) +#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q) + +static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname) +{ + unsigned long tlimit = jiffies + HZ; + + while (test_and_set_bit(bitname, lif->state) && + time_before(jiffies, tlimit)) + usleep_range(100, 200); + + return test_bit(bitname, lif->state); +} + +static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs) +{ + u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); + u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div); + + /* Div-by-zero should never be an issue, but check anyway */ + if (!div || !mult) + return 0; + + /* Round up in case usecs is close to the next hw unit */ + usecs += (div / mult) >> 1; + + /* Convert from usecs to device units */ + return (usecs * mult) / div; +} + +static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units) +{ + u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); + u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div); + + /* Div-by-zero should never be an issue, but check anyway */ + if (!div || !mult) + return 0; + + /* Convert from device units to usec */ + return (units * div) / mult; +} + +int ionic_lifs_alloc(struct ionic *ionic); +void ionic_lifs_free(struct ionic *ionic); +void ionic_lifs_deinit(struct ionic *ionic); +int ionic_lifs_init(struct ionic *ionic); +int ionic_lifs_register(struct ionic *ionic); +void ionic_lifs_unregister(struct ionic *ionic); +int ionic_lif_identify(struct ionic *ionic, u8 lif_type, + union ionic_lif_identity *lif_ident); +int ionic_lifs_size(struct ionic *ionic); +int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, + const u8 *key, const u32 *indir); + +int ionic_open(struct net_device *netdev); +int ionic_stop(struct net_device *netdev); +int ionic_reset_queues(struct ionic_lif *lif); + +static inline void debug_stats_txq_post(struct ionic_qcq *qcq, + struct ionic_txq_desc *desc, bool dbell) +{ + u8 num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT) + & IONIC_TXQ_DESC_NSGE_MASK); + + qcq->q.dbell_count += dbell; + + if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1)) + num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1; + + qcq->stats->tx.sg_cntr[num_sg_elems]++; +} + +static inline void debug_stats_napi_poll(struct ionic_qcq *qcq, + unsigned int work_done) +{ + qcq->napi_stats.poll_count++; + + if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1)) + work_done = IONIC_MAX_NUM_NAPI_CNTR - 1; + + qcq->napi_stats.work_done_cntr[work_done]++; +} + +#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++) +#define DEBUG_STATS_RX_BUFF_CNT(qcq) ((qcq)->stats->rx.buffers_posted++) +#define DEBUG_STATS_INTR_REARM(intr) ((intr)->rearm_count++) +#define DEBUG_STATS_TXQ_POST(qcq, txdesc, dbell) \ + debug_stats_txq_post(qcq, txdesc, dbell) +#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \ + debug_stats_napi_poll(qcq, work_done) + +#endif /* _IONIC_LIF_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c new file mode 100644 index 000000000000..15e432386b35 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -0,0 +1,548 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/utsname.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_debugfs.h" + +MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION); +MODULE_AUTHOR("Pensando Systems, Inc"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IONIC_DRV_VERSION); + +static const char *ionic_error_to_str(enum ionic_status_code code) +{ + switch (code) { + case IONIC_RC_SUCCESS: + return "IONIC_RC_SUCCESS"; + case IONIC_RC_EVERSION: + return "IONIC_RC_EVERSION"; + case IONIC_RC_EOPCODE: + return "IONIC_RC_EOPCODE"; + case IONIC_RC_EIO: + return "IONIC_RC_EIO"; + case IONIC_RC_EPERM: + return "IONIC_RC_EPERM"; + case IONIC_RC_EQID: + return "IONIC_RC_EQID"; + case IONIC_RC_EQTYPE: + return "IONIC_RC_EQTYPE"; + case IONIC_RC_ENOENT: + return "IONIC_RC_ENOENT"; + case IONIC_RC_EINTR: + return "IONIC_RC_EINTR"; + case IONIC_RC_EAGAIN: + return "IONIC_RC_EAGAIN"; + case IONIC_RC_ENOMEM: + return "IONIC_RC_ENOMEM"; + case IONIC_RC_EFAULT: + return "IONIC_RC_EFAULT"; + case IONIC_RC_EBUSY: + return "IONIC_RC_EBUSY"; + case IONIC_RC_EEXIST: + return "IONIC_RC_EEXIST"; + case IONIC_RC_EINVAL: + return "IONIC_RC_EINVAL"; + case IONIC_RC_ENOSPC: + return "IONIC_RC_ENOSPC"; + case IONIC_RC_ERANGE: + return "IONIC_RC_ERANGE"; + case IONIC_RC_BAD_ADDR: + return "IONIC_RC_BAD_ADDR"; + case IONIC_RC_DEV_CMD: + return "IONIC_RC_DEV_CMD"; + case IONIC_RC_ERROR: + return "IONIC_RC_ERROR"; + case IONIC_RC_ERDMA: + return "IONIC_RC_ERDMA"; + default: + return "IONIC_RC_UNKNOWN"; + } +} + +static int ionic_error_to_errno(enum ionic_status_code code) +{ + switch (code) { + case IONIC_RC_SUCCESS: + return 0; + case IONIC_RC_EVERSION: + case IONIC_RC_EQTYPE: + case IONIC_RC_EQID: + case IONIC_RC_EINVAL: + return -EINVAL; + case IONIC_RC_EPERM: + return -EPERM; + case IONIC_RC_ENOENT: + return -ENOENT; + case IONIC_RC_EAGAIN: + return -EAGAIN; + case IONIC_RC_ENOMEM: + return -ENOMEM; + case IONIC_RC_EFAULT: + return -EFAULT; + case IONIC_RC_EBUSY: + return -EBUSY; + case IONIC_RC_EEXIST: + return -EEXIST; + case IONIC_RC_ENOSPC: + return -ENOSPC; + case IONIC_RC_ERANGE: + return -ERANGE; + case IONIC_RC_BAD_ADDR: + return -EFAULT; + case IONIC_RC_EOPCODE: + case IONIC_RC_EINTR: + case IONIC_RC_DEV_CMD: + case IONIC_RC_ERROR: + case IONIC_RC_ERDMA: + case IONIC_RC_EIO: + default: + return -EIO; + } +} + +static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode) +{ + switch (opcode) { + case IONIC_CMD_NOP: + return "IONIC_CMD_NOP"; + case IONIC_CMD_INIT: + return "IONIC_CMD_INIT"; + case IONIC_CMD_RESET: + return "IONIC_CMD_RESET"; + case IONIC_CMD_IDENTIFY: + return "IONIC_CMD_IDENTIFY"; + case IONIC_CMD_GETATTR: + return "IONIC_CMD_GETATTR"; + case IONIC_CMD_SETATTR: + return "IONIC_CMD_SETATTR"; + case IONIC_CMD_PORT_IDENTIFY: + return "IONIC_CMD_PORT_IDENTIFY"; + case IONIC_CMD_PORT_INIT: + return "IONIC_CMD_PORT_INIT"; + case IONIC_CMD_PORT_RESET: + return "IONIC_CMD_PORT_RESET"; + case IONIC_CMD_PORT_GETATTR: + return "IONIC_CMD_PORT_GETATTR"; + case IONIC_CMD_PORT_SETATTR: + return "IONIC_CMD_PORT_SETATTR"; + case IONIC_CMD_LIF_INIT: + return "IONIC_CMD_LIF_INIT"; + case IONIC_CMD_LIF_RESET: + return "IONIC_CMD_LIF_RESET"; + case IONIC_CMD_LIF_IDENTIFY: + return "IONIC_CMD_LIF_IDENTIFY"; + case IONIC_CMD_LIF_SETATTR: + return "IONIC_CMD_LIF_SETATTR"; + case IONIC_CMD_LIF_GETATTR: + return "IONIC_CMD_LIF_GETATTR"; + case IONIC_CMD_RX_MODE_SET: + return "IONIC_CMD_RX_MODE_SET"; + case IONIC_CMD_RX_FILTER_ADD: + return "IONIC_CMD_RX_FILTER_ADD"; + case IONIC_CMD_RX_FILTER_DEL: + return "IONIC_CMD_RX_FILTER_DEL"; + case IONIC_CMD_Q_INIT: + return "IONIC_CMD_Q_INIT"; + case IONIC_CMD_Q_CONTROL: + return "IONIC_CMD_Q_CONTROL"; + case IONIC_CMD_RDMA_RESET_LIF: + return "IONIC_CMD_RDMA_RESET_LIF"; + case IONIC_CMD_RDMA_CREATE_EQ: + return "IONIC_CMD_RDMA_CREATE_EQ"; + case IONIC_CMD_RDMA_CREATE_CQ: + return "IONIC_CMD_RDMA_CREATE_CQ"; + case IONIC_CMD_RDMA_CREATE_ADMINQ: + return "IONIC_CMD_RDMA_CREATE_ADMINQ"; + case IONIC_CMD_FW_DOWNLOAD: + return "IONIC_CMD_FW_DOWNLOAD"; + case IONIC_CMD_FW_CONTROL: + return "IONIC_CMD_FW_CONTROL"; + default: + return "DEVCMD_UNKNOWN"; + } +} + +static void ionic_adminq_flush(struct ionic_lif *lif) +{ + struct ionic_queue *adminq = &lif->adminqcq->q; + + spin_lock(&lif->adminq_lock); + + while (adminq->tail != adminq->head) { + memset(adminq->tail->desc, 0, sizeof(union ionic_adminq_cmd)); + adminq->tail->cb = NULL; + adminq->tail->cb_arg = NULL; + adminq->tail = adminq->tail->next; + } + spin_unlock(&lif->adminq_lock); +} + +static int ionic_adminq_check_err(struct ionic_lif *lif, + struct ionic_admin_ctx *ctx, + bool timeout) +{ + struct net_device *netdev = lif->netdev; + const char *opcode_str; + const char *status_str; + int err = 0; + + if (ctx->comp.comp.status || timeout) { + opcode_str = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + status_str = ionic_error_to_str(ctx->comp.comp.status); + err = timeout ? -ETIMEDOUT : + ionic_error_to_errno(ctx->comp.comp.status); + + netdev_err(netdev, "%s (%d) failed: %s (%d)\n", + opcode_str, ctx->cmd.cmd.opcode, + timeout ? "TIMEOUT" : status_str, err); + + if (timeout) + ionic_adminq_flush(lif); + } + + return err; +} + +static void ionic_adminq_cb(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg) +{ + struct ionic_admin_ctx *ctx = cb_arg; + struct ionic_admin_comp *comp; + struct device *dev; + + if (!ctx) + return; + + comp = cq_info->cq_desc; + dev = &q->lif->netdev->dev; + + memcpy(&ctx->comp, comp, sizeof(*comp)); + + dev_dbg(dev, "comp admin queue command:\n"); + dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1, + &ctx->comp, sizeof(ctx->comp), true); + + complete_all(&ctx->work); +} + +static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + struct ionic_queue *adminq = &lif->adminqcq->q; + int err = 0; + + WARN_ON(in_interrupt()); + + spin_lock(&lif->adminq_lock); + if (!ionic_q_has_space(adminq, 1)) { + err = -ENOSPC; + goto err_out; + } + + memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd)); + + dev_dbg(&lif->netdev->dev, "post admin queue command:\n"); + dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1, + &ctx->cmd, sizeof(ctx->cmd), true); + + ionic_q_post(adminq, true, ionic_adminq_cb, ctx); + +err_out: + spin_unlock(&lif->adminq_lock); + + return err; +} + +int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + struct net_device *netdev = lif->netdev; + unsigned long remaining; + const char *name; + int err; + + err = ionic_adminq_post(lif, ctx); + if (err) { + name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + netdev_err(netdev, "Posting of %s (%d) failed: %d\n", + name, ctx->cmd.cmd.opcode, err); + return err; + } + + remaining = wait_for_completion_timeout(&ctx->work, + HZ * (ulong)DEVCMD_TIMEOUT); + return ionic_adminq_check_err(lif, ctx, (remaining == 0)); +} + +int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb, + ionic_cq_done_cb done_cb, void *done_arg) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *cq = &qcq->cq; + u32 work_done, flags = 0; + + work_done = ionic_cq_service(cq, budget, cb, done_cb, done_arg); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + DEBUG_STATS_INTR_REARM(cq->bound_intr); + } + + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + ionic_intr_credits(cq->lif->ionic->idev.intr_ctrl, + cq->bound_intr->index, + work_done, flags); + } + + DEBUG_STATS_NAPI_POLL(qcq, work_done); + + return work_done; +} + +int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds) +{ + struct ionic_dev *idev = &ionic->idev; + unsigned long start_time; + unsigned long max_wait; + unsigned long duration; + int opcode; + int done; + int err; + + WARN_ON(in_interrupt()); + + /* Wait for dev cmd to complete, retrying if we get EAGAIN, + * but don't wait any longer than max_seconds. + */ + max_wait = jiffies + (max_seconds * HZ); +try_again: + start_time = jiffies; + do { + done = ionic_dev_cmd_done(idev); + if (done) + break; + msleep(20); + } while (!done && time_before(jiffies, max_wait)); + duration = jiffies - start_time; + + opcode = idev->dev_cmd_regs->cmd.cmd.opcode; + dev_dbg(ionic->dev, "DEVCMD %s (%d) done=%d took %ld secs (%ld jiffies)\n", + ionic_opcode_to_str(opcode), opcode, + done, duration / HZ, duration); + + if (!done && !time_before(jiffies, max_wait)) { + dev_warn(ionic->dev, "DEVCMD %s (%d) timeout after %ld secs\n", + ionic_opcode_to_str(opcode), opcode, max_seconds); + return -ETIMEDOUT; + } + + err = ionic_dev_cmd_status(&ionic->idev); + if (err) { + if (err == IONIC_RC_EAGAIN && !time_after(jiffies, max_wait)) { + dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) retrying...\n", + ionic_opcode_to_str(opcode), opcode, + ionic_error_to_str(err), err); + + msleep(1000); + iowrite32(0, &idev->dev_cmd_regs->done); + iowrite32(1, &idev->dev_cmd_regs->doorbell); + goto try_again; + } + + dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n", + ionic_opcode_to_str(opcode), opcode, + ionic_error_to_str(err), err); + + return ionic_error_to_errno(err); + } + + return 0; +} + +int ionic_setup(struct ionic *ionic) +{ + int err; + + err = ionic_dev_setup(ionic); + if (err) + return err; + + return 0; +} + +int ionic_identify(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + struct ionic_dev *idev = &ionic->idev; + size_t sz; + int err; + + memset(ident, 0, sizeof(*ident)); + + ident->drv.os_type = cpu_to_le32(IONIC_OS_TYPE_LINUX); + strncpy(ident->drv.driver_ver_str, IONIC_DRV_VERSION, + sizeof(ident->drv.driver_ver_str) - 1); + + mutex_lock(&ionic->dev_cmd_lock); + + sz = min(sizeof(ident->drv), sizeof(idev->dev_cmd_regs->data)); + memcpy_toio(&idev->dev_cmd_regs->data, &ident->drv, sz); + + ionic_dev_cmd_identify(idev, IONIC_IDENTITY_VERSION_1); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + if (!err) { + sz = min(sizeof(ident->dev), sizeof(idev->dev_cmd_regs->data)); + memcpy_fromio(&ident->dev, &idev->dev_cmd_regs->data, sz); + } + + mutex_unlock(&ionic->dev_cmd_lock); + + if (err) + goto err_out_unmap; + + ionic_debugfs_add_ident(ionic); + + return 0; + +err_out_unmap: + return err; +} + +int ionic_init(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + int err; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_init(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +int ionic_reset(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + int err; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_reset(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +int ionic_port_identify(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + struct ionic_dev *idev = &ionic->idev; + size_t sz; + int err; + + mutex_lock(&ionic->dev_cmd_lock); + + ionic_dev_cmd_port_identify(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + if (!err) { + sz = min(sizeof(ident->port), sizeof(idev->dev_cmd_regs->data)); + memcpy_fromio(&ident->port, &idev->dev_cmd_regs->data, sz); + } + + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +int ionic_port_init(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + struct ionic_dev *idev = &ionic->idev; + size_t sz; + int err; + + if (idev->port_info) + return 0; + + idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE); + idev->port_info = dma_alloc_coherent(ionic->dev, idev->port_info_sz, + &idev->port_info_pa, + GFP_KERNEL); + if (!idev->port_info) { + dev_err(ionic->dev, "Failed to allocate port info, aborting\n"); + return -ENOMEM; + } + + sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data)); + + mutex_lock(&ionic->dev_cmd_lock); + + memcpy_toio(&idev->dev_cmd_regs->data, &ident->port.config, sz); + ionic_dev_cmd_port_init(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + + ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP); + (void)ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + + mutex_unlock(&ionic->dev_cmd_lock); + if (err) { + dev_err(ionic->dev, "Failed to init port\n"); + dma_free_coherent(ionic->dev, idev->port_info_sz, + idev->port_info, idev->port_info_pa); + idev->port_info = NULL; + idev->port_info_pa = 0; + } + + return err; +} + +int ionic_port_reset(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + int err; + + if (!idev->port_info) + return 0; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_reset(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + + dma_free_coherent(ionic->dev, idev->port_info_sz, + idev->port_info, idev->port_info_pa); + + idev->port_info = NULL; + idev->port_info_pa = 0; + + if (err) + dev_err(ionic->dev, "Failed to reset port\n"); + + return err; +} + +static int __init ionic_init_module(void) +{ + pr_info("%s %s, ver %s\n", + IONIC_DRV_NAME, IONIC_DRV_DESCRIPTION, IONIC_DRV_VERSION); + ionic_debugfs_create(); + return ionic_bus_register_driver(); +} + +static void __exit ionic_cleanup_module(void) +{ + ionic_bus_unregister_driver(); + ionic_debugfs_destroy(); + + pr_info("%s removed\n", IONIC_DRV_NAME); +} + +module_init(ionic_init_module); +module_exit(ionic_cleanup_module); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_regs.h b/drivers/net/ethernet/pensando/ionic/ionic_regs.h new file mode 100644 index 000000000000..03ee5a36472b --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_regs.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */ +/* Copyright (c) 2018-2019 Pensando Systems, Inc. All rights reserved. */ + +#ifndef IONIC_REGS_H +#define IONIC_REGS_H + +#include <linux/io.h> + +/** struct ionic_intr - interrupt control register set. + * @coal_init: coalesce timer initial value. + * @mask: interrupt mask value. + * @credits: interrupt credit count and return. + * @mask_assert: interrupt mask value on assert. + * @coal: coalesce timer time remaining. + */ +struct ionic_intr { + u32 coal_init; + u32 mask; + u32 credits; + u32 mask_assert; + u32 coal; + u32 rsvd[3]; +}; + +#define IONIC_INTR_CTRL_REGS_MAX 2048 +#define IONIC_INTR_CTRL_COAL_MAX 0x3F + +/** enum ionic_intr_mask_vals - valid values for mask and mask_assert. + * @IONIC_INTR_MASK_CLEAR: unmask interrupt. + * @IONIC_INTR_MASK_SET: mask interrupt. + */ +enum ionic_intr_mask_vals { + IONIC_INTR_MASK_CLEAR = 0, + IONIC_INTR_MASK_SET = 1, +}; + +/** enum ionic_intr_credits_bits - bitwise composition of credits values. + * @IONIC_INTR_CRED_COUNT: bit mask of credit count, no shift needed. + * @IONIC_INTR_CRED_COUNT_SIGNED: bit mask of credit count, including sign bit. + * @IONIC_INTR_CRED_UNMASK: unmask the interrupt. + * @IONIC_INTR_CRED_RESET_COALESCE: reset the coalesce timer. + * @IONIC_INTR_CRED_REARM: unmask the and reset the timer. + */ +enum ionic_intr_credits_bits { + IONIC_INTR_CRED_COUNT = 0x7fffu, + IONIC_INTR_CRED_COUNT_SIGNED = 0xffffu, + IONIC_INTR_CRED_UNMASK = 0x10000u, + IONIC_INTR_CRED_RESET_COALESCE = 0x20000u, + IONIC_INTR_CRED_REARM = (IONIC_INTR_CRED_UNMASK | + IONIC_INTR_CRED_RESET_COALESCE), +}; + +static inline void ionic_intr_coal_init(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 coal) +{ + iowrite32(coal, &intr_ctrl[intr_idx].coal_init); +} + +static inline void ionic_intr_mask(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 mask) +{ + iowrite32(mask, &intr_ctrl[intr_idx].mask); +} + +static inline void ionic_intr_credits(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 cred, u32 flags) +{ + if (WARN_ON_ONCE(cred > IONIC_INTR_CRED_COUNT)) { + cred = ioread32(&intr_ctrl[intr_idx].credits); + cred &= IONIC_INTR_CRED_COUNT_SIGNED; + } + + iowrite32(cred | flags, &intr_ctrl[intr_idx].credits); +} + +static inline void ionic_intr_clean(struct ionic_intr __iomem *intr_ctrl, + int intr_idx) +{ + u32 cred; + + cred = ioread32(&intr_ctrl[intr_idx].credits); + cred &= IONIC_INTR_CRED_COUNT_SIGNED; + cred |= IONIC_INTR_CRED_RESET_COALESCE; + iowrite32(cred, &intr_ctrl[intr_idx].credits); +} + +static inline void ionic_intr_mask_assert(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 mask) +{ + iowrite32(mask, &intr_ctrl[intr_idx].mask_assert); +} + +/** enum ionic_dbell_bits - bitwise composition of dbell values. + * + * @IONIC_DBELL_QID_MASK: unshifted mask of valid queue id bits. + * @IONIC_DBELL_QID_SHIFT: queue id shift amount in dbell value. + * @IONIC_DBELL_QID: macro to build QID component of dbell value. + * + * @IONIC_DBELL_RING_MASK: unshifted mask of valid ring bits. + * @IONIC_DBELL_RING_SHIFT: ring shift amount in dbell value. + * @IONIC_DBELL_RING: macro to build ring component of dbell value. + * + * @IONIC_DBELL_RING_0: ring zero dbell component value. + * @IONIC_DBELL_RING_1: ring one dbell component value. + * @IONIC_DBELL_RING_2: ring two dbell component value. + * @IONIC_DBELL_RING_3: ring three dbell component value. + * + * @IONIC_DBELL_INDEX_MASK: bit mask of valid index bits, no shift needed. + */ +enum ionic_dbell_bits { + IONIC_DBELL_QID_MASK = 0xffffff, + IONIC_DBELL_QID_SHIFT = 24, + +#define IONIC_DBELL_QID(n) \ + (((u64)(n) & IONIC_DBELL_QID_MASK) << IONIC_DBELL_QID_SHIFT) + + IONIC_DBELL_RING_MASK = 0x7, + IONIC_DBELL_RING_SHIFT = 16, + +#define IONIC_DBELL_RING(n) \ + (((u64)(n) & IONIC_DBELL_RING_MASK) << IONIC_DBELL_RING_SHIFT) + + IONIC_DBELL_RING_0 = 0, + IONIC_DBELL_RING_1 = IONIC_DBELL_RING(1), + IONIC_DBELL_RING_2 = IONIC_DBELL_RING(2), + IONIC_DBELL_RING_3 = IONIC_DBELL_RING(3), + + IONIC_DBELL_INDEX_MASK = 0xffff, +}; + +static inline void ionic_dbell_ring(u64 __iomem *db_page, int qtype, u64 val) +{ + writeq(val, &db_page[qtype]); +} + +#endif /* IONIC_REGS_H */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c new file mode 100644 index 000000000000..7a093f148ee5 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#include "ionic.h" +#include "ionic_lif.h" +#include "ionic_rx_filter.h" + +void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f) +{ + struct device *dev = lif->ionic->dev; + + hlist_del(&f->by_id); + hlist_del(&f->by_hash); + devm_kfree(dev, f); +} + +int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .filter_id = cpu_to_le32(f->filter_id), + }, + }; + + return ionic_adminq_post_wait(lif, &ctx); +} + +int ionic_rx_filters_init(struct ionic_lif *lif) +{ + unsigned int i; + + spin_lock_init(&lif->rx_filters.lock); + + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]); + INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]); + } + + return 0; +} + +void ionic_rx_filters_deinit(struct ionic_lif *lif) +{ + struct ionic_rx_filter *f; + struct hlist_head *head; + struct hlist_node *tmp; + unsigned int i; + + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + head = &lif->rx_filters.by_id[i]; + hlist_for_each_entry_safe(f, tmp, head, by_id) + ionic_rx_filter_free(lif, f); + } +} + +int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, + u32 hash, struct ionic_admin_ctx *ctx) +{ + struct device *dev = lif->ionic->dev; + struct ionic_rx_filter_add_cmd *ac; + struct ionic_rx_filter *f; + struct hlist_head *head; + unsigned int key; + + ac = &ctx->cmd.rx_filter_add; + + switch (le16_to_cpu(ac->match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + key = le16_to_cpu(ac->vlan.vlan); + break; + case IONIC_RX_FILTER_MATCH_MAC: + key = *(u32 *)ac->mac.addr; + break; + case IONIC_RX_FILTER_MATCH_MAC_VLAN: + key = le16_to_cpu(ac->mac_vlan.vlan); + break; + default: + return -EINVAL; + } + + f = devm_kzalloc(dev, sizeof(*f), GFP_KERNEL); + if (!f) + return -ENOMEM; + + f->flow_id = flow_id; + f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); + f->rxq_index = rxq_index; + memcpy(&f->cmd, ac, sizeof(f->cmd)); + + INIT_HLIST_NODE(&f->by_hash); + INIT_HLIST_NODE(&f->by_id); + + spin_lock_bh(&lif->rx_filters.lock); + + key = hash_32(key, IONIC_RX_FILTER_HASH_BITS); + head = &lif->rx_filters.by_hash[key]; + hlist_add_head(&f->by_hash, head); + + key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; + head = &lif->rx_filters.by_id[key]; + hlist_add_head(&f->by_id, head); + + spin_unlock_bh(&lif->rx_filters.lock); + + return 0; +} + +struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid) +{ + struct ionic_rx_filter *f; + struct hlist_head *head; + unsigned int key; + + key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS); + head = &lif->rx_filters.by_hash[key]; + + hlist_for_each_entry(f, head, by_hash) { + if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_VLAN) + continue; + if (le16_to_cpu(f->cmd.vlan.vlan) == vid) + return f; + } + + return NULL; +} + +struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, + const u8 *addr) +{ + struct ionic_rx_filter *f; + struct hlist_head *head; + unsigned int key; + + key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS); + head = &lif->rx_filters.by_hash[key]; + + hlist_for_each_entry(f, head, by_hash) { + if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_MAC) + continue; + if (memcmp(addr, f->cmd.mac.addr, ETH_ALEN) == 0) + return f; + } + + return NULL; +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h new file mode 100644 index 000000000000..b6aec9c19918 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_RX_FILTER_H_ +#define _IONIC_RX_FILTER_H_ + +#define IONIC_RXQ_INDEX_ANY (0xFFFF) +struct ionic_rx_filter { + u32 flow_id; + u32 filter_id; + u16 rxq_index; + struct ionic_rx_filter_add_cmd cmd; + struct hlist_node by_hash; + struct hlist_node by_id; +}; + +#define IONIC_RX_FILTER_HASH_BITS 10 +#define IONIC_RX_FILTER_HLISTS BIT(IONIC_RX_FILTER_HASH_BITS) +#define IONIC_RX_FILTER_HLISTS_MASK (IONIC_RX_FILTER_HLISTS - 1) +struct ionic_rx_filters { + spinlock_t lock; /* filter list lock */ + struct hlist_head by_hash[IONIC_RX_FILTER_HLISTS]; /* by skb hash */ + struct hlist_head by_id[IONIC_RX_FILTER_HLISTS]; /* by filter_id */ +}; + +void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f); +int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f); +int ionic_rx_filters_init(struct ionic_lif *lif); +void ionic_rx_filters_deinit(struct ionic_lif *lif); +int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, + u32 hash, struct ionic_admin_ctx *ctx); +struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid); +struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8 *addr); + +#endif /* _IONIC_RX_FILTER_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c new file mode 100644 index 000000000000..e2907884f843 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> + +#include "ionic.h" +#include "ionic_lif.h" +#include "ionic_stats.h" + +static const struct ionic_stat_desc ionic_lif_stats_desc[] = { + IONIC_LIF_STAT_DESC(tx_packets), + IONIC_LIF_STAT_DESC(tx_bytes), + IONIC_LIF_STAT_DESC(rx_packets), + IONIC_LIF_STAT_DESC(rx_bytes), + IONIC_LIF_STAT_DESC(tx_tso), + IONIC_LIF_STAT_DESC(tx_no_csum), + IONIC_LIF_STAT_DESC(tx_csum), + IONIC_LIF_STAT_DESC(rx_csum_none), + IONIC_LIF_STAT_DESC(rx_csum_complete), + IONIC_LIF_STAT_DESC(rx_csum_error), +}; + +static const struct ionic_stat_desc ionic_tx_stats_desc[] = { + IONIC_TX_STAT_DESC(pkts), + IONIC_TX_STAT_DESC(bytes), + IONIC_TX_STAT_DESC(clean), + IONIC_TX_STAT_DESC(dma_map_err), + IONIC_TX_STAT_DESC(linearize), + IONIC_TX_STAT_DESC(frags), +}; + +static const struct ionic_stat_desc ionic_rx_stats_desc[] = { + IONIC_RX_STAT_DESC(pkts), + IONIC_RX_STAT_DESC(bytes), + IONIC_RX_STAT_DESC(dma_map_err), + IONIC_RX_STAT_DESC(alloc_err), + IONIC_RX_STAT_DESC(csum_none), + IONIC_RX_STAT_DESC(csum_complete), + IONIC_RX_STAT_DESC(csum_error), +}; + +static const struct ionic_stat_desc ionic_txq_stats_desc[] = { + IONIC_TX_Q_STAT_DESC(stop), + IONIC_TX_Q_STAT_DESC(wake), + IONIC_TX_Q_STAT_DESC(drop), + IONIC_TX_Q_STAT_DESC(dbell_count), +}; + +static const struct ionic_stat_desc ionic_dbg_cq_stats_desc[] = { + IONIC_CQ_STAT_DESC(compl_count), +}; + +static const struct ionic_stat_desc ionic_dbg_intr_stats_desc[] = { + IONIC_INTR_STAT_DESC(rearm_count), +}; + +static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = { + IONIC_NAPI_STAT_DESC(poll_count), +}; + +#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc) +#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc) +#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc) +#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc) +#define IONIC_NUM_DBG_CQ_STATS ARRAY_SIZE(ionic_dbg_cq_stats_desc) +#define IONIC_NUM_DBG_INTR_STATS ARRAY_SIZE(ionic_dbg_intr_stats_desc) +#define IONIC_NUM_DBG_NAPI_STATS ARRAY_SIZE(ionic_dbg_napi_stats_desc) + +#define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues) + +static void ionic_get_lif_stats(struct ionic_lif *lif, + struct ionic_lif_sw_stats *stats) +{ + struct ionic_tx_stats *tstats; + struct ionic_rx_stats *rstats; + struct ionic_qcq *txqcq; + struct ionic_qcq *rxqcq; + int q_num; + + memset(stats, 0, sizeof(*stats)); + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) { + txqcq = lif_to_txqcq(lif, q_num); + if (txqcq && txqcq->stats) { + tstats = &txqcq->stats->tx; + stats->tx_packets += tstats->pkts; + stats->tx_bytes += tstats->bytes; + stats->tx_tso += tstats->tso; + stats->tx_no_csum += tstats->no_csum; + stats->tx_csum += tstats->csum; + } + + rxqcq = lif_to_rxqcq(lif, q_num); + if (rxqcq && rxqcq->stats) { + rstats = &rxqcq->stats->rx; + stats->rx_packets += rstats->pkts; + stats->rx_bytes += rstats->bytes; + stats->rx_csum_none += rstats->csum_none; + stats->rx_csum_complete += rstats->csum_complete; + stats->rx_csum_error += rstats->csum_error; + } + } +} + +static u64 ionic_sw_stats_get_count(struct ionic_lif *lif) +{ + u64 total = 0; + + /* lif stats */ + total += IONIC_NUM_LIF_STATS; + + /* tx stats */ + total += MAX_Q(lif) * IONIC_NUM_TX_STATS; + + /* rx stats */ + total += MAX_Q(lif) * IONIC_NUM_RX_STATS; + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + /* tx debug stats */ + total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS + + IONIC_NUM_TX_Q_STATS + + IONIC_NUM_DBG_INTR_STATS + + IONIC_MAX_NUM_SG_CNTR); + + /* rx debug stats */ + total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS + + IONIC_NUM_DBG_INTR_STATS + + IONIC_NUM_DBG_NAPI_STATS + + IONIC_MAX_NUM_NAPI_CNTR); + } + + return total; +} + +static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf) +{ + int i, q_num; + + for (i = 0; i < IONIC_NUM_LIF_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, ionic_lif_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (q_num = 0; q_num < MAX_Q(lif); q_num++) { + for (i = 0; i < IONIC_NUM_TX_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, "tx_%d_%s", + q_num, ionic_tx_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "txq_%d_%s", + q_num, + ionic_txq_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "txq_%d_cq_%s", + q_num, + ionic_dbg_cq_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "txq_%d_intr_%s", + q_num, + ionic_dbg_intr_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "txq_%d_sg_cntr_%d", + q_num, i); + *buf += ETH_GSTRING_LEN; + } + } + } + for (q_num = 0; q_num < MAX_Q(lif); q_num++) { + for (i = 0; i < IONIC_NUM_RX_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "rx_%d_%s", + q_num, ionic_rx_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "rxq_%d_cq_%s", + q_num, + ionic_dbg_cq_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "rxq_%d_intr_%s", + q_num, + ionic_dbg_intr_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "rxq_%d_napi_%s", + q_num, + ionic_dbg_napi_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "rxq_%d_napi_work_done_%d", + q_num, i); + *buf += ETH_GSTRING_LEN; + } + } + } +} + +static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf) +{ + struct ionic_lif_sw_stats lif_stats; + struct ionic_qcq *txqcq, *rxqcq; + int i, q_num; + + ionic_get_lif_stats(lif, &lif_stats); + + for (i = 0; i < IONIC_NUM_LIF_STATS; i++) { + **buf = IONIC_READ_STAT64(&lif_stats, &ionic_lif_stats_desc[i]); + (*buf)++; + } + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) { + txqcq = lif_to_txqcq(lif, q_num); + + for (i = 0; i < IONIC_NUM_TX_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->stats->tx, + &ionic_tx_stats_desc[i]); + (*buf)++; + } + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->q, + &ionic_txq_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->cq, + &ionic_dbg_cq_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->intr, + &ionic_dbg_intr_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { + **buf = txqcq->stats->tx.sg_cntr[i]; + (*buf)++; + } + } + } + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) { + rxqcq = lif_to_rxqcq(lif, q_num); + + for (i = 0; i < IONIC_NUM_RX_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->stats->rx, + &ionic_rx_stats_desc[i]); + (*buf)++; + } + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->cq, + &ionic_dbg_cq_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->intr, + &ionic_dbg_intr_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->napi_stats, + &ionic_dbg_napi_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { + **buf = rxqcq->napi_stats.work_done_cntr[i]; + (*buf)++; + } + } + } +} + +const struct ionic_stats_group_intf ionic_stats_groups[] = { + /* SW Stats group */ + { + .get_strings = ionic_sw_stats_get_strings, + .get_values = ionic_sw_stats_get_values, + .get_count = ionic_sw_stats_get_count, + }, + /* Add more stat groups here */ +}; + +const int ionic_num_stats_grps = ARRAY_SIZE(ionic_stats_groups); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.h b/drivers/net/ethernet/pensando/ionic/ionic_stats.h new file mode 100644 index 000000000000..d2c1122a2c6e --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_STATS_H_ +#define _IONIC_STATS_H_ + +#define IONIC_STAT_TO_OFFSET(type, stat_name) (offsetof(type, stat_name)) + +#define IONIC_STAT_DESC(type, stat_name) { \ + .name = #stat_name, \ + .offset = IONIC_STAT_TO_OFFSET(type, stat_name) \ +} + +#define IONIC_LIF_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_lif_sw_stats, stat_name) + +#define IONIC_TX_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_tx_stats, stat_name) + +#define IONIC_RX_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_rx_stats, stat_name) + +#define IONIC_TX_Q_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_queue, stat_name) + +#define IONIC_CQ_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_cq, stat_name) + +#define IONIC_INTR_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_intr_info, stat_name) + +#define IONIC_NAPI_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_napi_stats, stat_name) + +/* Interface structure for a particalar stats group */ +struct ionic_stats_group_intf { + void (*get_strings)(struct ionic_lif *lif, u8 **buf); + void (*get_values)(struct ionic_lif *lif, u64 **buf); + u64 (*get_count)(struct ionic_lif *lif); +}; + +extern const struct ionic_stats_group_intf ionic_stats_groups[]; +extern const int ionic_num_stats_grps; + +#define IONIC_READ_STAT64(base_ptr, desc_ptr) \ + (*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset))) + +struct ionic_stat_desc { + char name[ETH_GSTRING_LEN]; + u64 offset; +}; + +#endif /* _IONIC_STATS_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c new file mode 100644 index 000000000000..ab6663d94f42 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -0,0 +1,925 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/if_vlan.h> +#include <net/ip6_checksum.h> + +#include "ionic.h" +#include "ionic_lif.h" +#include "ionic_txrx.h" + +static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg); + +static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, + ionic_desc_cb cb_func, void *cb_arg) +{ + DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell); + + ionic_q_post(q, ring_dbell, cb_func, cb_arg); +} + +static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, + ionic_desc_cb cb_func, void *cb_arg) +{ + ionic_q_post(q, ring_dbell, cb_func, cb_arg); + + DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q)); +} + +static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) +{ + return netdev_get_tx_queue(q->lif->netdev, q->index); +} + +static void ionic_rx_recycle(struct ionic_queue *q, struct ionic_desc_info *desc_info, + struct sk_buff *skb) +{ + struct ionic_rxq_desc *old = desc_info->desc; + struct ionic_rxq_desc *new = q->head->desc; + + new->addr = old->addr; + new->len = old->len; + + ionic_rxq_post(q, true, ionic_rx_clean, skb); +} + +static bool ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, struct sk_buff **skb) +{ + struct ionic_rxq_comp *comp = cq_info->cq_desc; + struct ionic_rxq_desc *desc = desc_info->desc; + struct net_device *netdev = q->lif->netdev; + struct device *dev = q->lif->ionic->dev; + struct sk_buff *new_skb; + u16 clen, dlen; + + clen = le16_to_cpu(comp->len); + dlen = le16_to_cpu(desc->len); + if (clen > q->lif->rx_copybreak) { + dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr), + dlen, DMA_FROM_DEVICE); + return false; + } + + new_skb = netdev_alloc_skb_ip_align(netdev, clen); + if (!new_skb) { + dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr), + dlen, DMA_FROM_DEVICE); + return false; + } + + dma_sync_single_for_cpu(dev, (dma_addr_t)le64_to_cpu(desc->addr), + clen, DMA_FROM_DEVICE); + + memcpy(new_skb->data, (*skb)->data, clen); + + ionic_rx_recycle(q, desc_info, *skb); + *skb = new_skb; + + return true; +} + +static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg) +{ + struct ionic_rxq_comp *comp = cq_info->cq_desc; + struct ionic_qcq *qcq = q_to_qcq(q); + struct sk_buff *skb = cb_arg; + struct ionic_rx_stats *stats; + struct net_device *netdev; + + stats = q_to_rx_stats(q); + netdev = q->lif->netdev; + + if (comp->status) { + ionic_rx_recycle(q, desc_info, skb); + return; + } + + if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) { + /* no packet processing while resetting */ + ionic_rx_recycle(q, desc_info, skb); + return; + } + + stats->pkts++; + stats->bytes += le16_to_cpu(comp->len); + + ionic_rx_copybreak(q, desc_info, cq_info, &skb); + + skb_put(skb, le16_to_cpu(comp->len)); + skb->protocol = eth_type_trans(skb, netdev); + + skb_record_rx_queue(skb, q->index); + + if (netdev->features & NETIF_F_RXHASH) { + switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { + case IONIC_PKT_TYPE_IPV4: + case IONIC_PKT_TYPE_IPV6: + skb_set_hash(skb, le32_to_cpu(comp->rss_hash), + PKT_HASH_TYPE_L3); + break; + case IONIC_PKT_TYPE_IPV4_TCP: + case IONIC_PKT_TYPE_IPV6_TCP: + case IONIC_PKT_TYPE_IPV4_UDP: + case IONIC_PKT_TYPE_IPV6_UDP: + skb_set_hash(skb, le32_to_cpu(comp->rss_hash), + PKT_HASH_TYPE_L4); + break; + } + } + + if (netdev->features & NETIF_F_RXCSUM) { + if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = (__wsum)le16_to_cpu(comp->csum); + stats->csum_complete++; + } + } else { + stats->csum_none++; + } + + if ((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || + (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) || + (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)) + stats->csum_error++; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(comp->vlan_tci)); + } + + napi_gro_receive(&qcq->napi, skb); +} + +static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) +{ + struct ionic_rxq_comp *comp = cq_info->cq_desc; + struct ionic_queue *q = cq->bound_q; + struct ionic_desc_info *desc_info; + + if (!color_match(comp->pkt_type_color, cq->done_color)) + return false; + + /* check for empty queue */ + if (q->tail->index == q->head->index) + return false; + + desc_info = q->tail; + if (desc_info->index != le16_to_cpu(comp->comp_index)) + return false; + + q->tail = desc_info->next; + + /* clean the related q entry, only one per qc completion */ + ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); + + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + + return true; +} + +static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit) +{ + u32 work_done = 0; + + while (ionic_rx_service(rxcq, rxcq->tail)) { + if (rxcq->tail->last) + rxcq->done_color = !rxcq->done_color; + rxcq->tail = rxcq->tail->next; + DEBUG_STATS_CQE_CNT(rxcq); + + if (++work_done >= limit) + break; + } + + return work_done; +} + +void ionic_rx_flush(struct ionic_cq *cq) +{ + struct ionic_dev *idev = &cq->lif->ionic->idev; + u32 work_done; + + work_done = ionic_rx_walk_cq(cq, cq->num_descs); + + if (work_done) + ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, + work_done, IONIC_INTR_CRED_RESET_COALESCE); +} + +static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, unsigned int len, + dma_addr_t *dma_addr) +{ + struct ionic_lif *lif = q->lif; + struct ionic_rx_stats *stats; + struct net_device *netdev; + struct sk_buff *skb; + struct device *dev; + + netdev = lif->netdev; + dev = lif->ionic->dev; + stats = q_to_rx_stats(q); + skb = netdev_alloc_skb_ip_align(netdev, len); + if (!skb) { + net_warn_ratelimited("%s: SKB alloc failed on %s!\n", + netdev->name, q->name); + stats->alloc_err++; + return NULL; + } + + *dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, *dma_addr)) { + dev_kfree_skb(skb); + net_warn_ratelimited("%s: DMA single map failed on %s!\n", + netdev->name, q->name); + stats->dma_map_err++; + return NULL; + } + + return skb; +} + +#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 2) - 1) + +void ionic_rx_fill(struct ionic_queue *q) +{ + struct net_device *netdev = q->lif->netdev; + struct ionic_rxq_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + bool ring_doorbell; + unsigned int len; + unsigned int i; + + len = netdev->mtu + ETH_HLEN; + + for (i = ionic_q_space_avail(q); i; i--) { + skb = ionic_rx_skb_alloc(q, len, &dma_addr); + if (!skb) + return; + + desc = q->head->desc; + desc->addr = cpu_to_le64(dma_addr); + desc->len = cpu_to_le16(len); + desc->opcode = IONIC_RXQ_DESC_OPCODE_SIMPLE; + + ring_doorbell = ((q->head->index + 1) & + IONIC_RX_RING_DOORBELL_STRIDE) == 0; + + ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, skb); + } +} + +static void ionic_rx_fill_cb(void *arg) +{ + ionic_rx_fill(arg); +} + +void ionic_rx_empty(struct ionic_queue *q) +{ + struct device *dev = q->lif->ionic->dev; + struct ionic_desc_info *cur; + struct ionic_rxq_desc *desc; + + for (cur = q->tail; cur != q->head; cur = cur->next) { + desc = cur->desc; + dma_unmap_single(dev, le64_to_cpu(desc->addr), + le16_to_cpu(desc->len), DMA_FROM_DEVICE); + dev_kfree_skb(cur->cb_arg); + cur->cb_arg = NULL; + } +} + +int ionic_rx_napi(struct napi_struct *napi, int budget) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *rxcq = napi_to_cq(napi); + unsigned int qi = rxcq->bound_q->index; + struct ionic_dev *idev; + struct ionic_lif *lif; + struct ionic_cq *txcq; + u32 work_done = 0; + u32 flags = 0; + + lif = rxcq->bound_q->lif; + idev = &lif->ionic->idev; + txcq = &lif->txqcqs[qi].qcq->cq; + + ionic_tx_flush(txcq); + + work_done = ionic_rx_walk_cq(rxcq, budget); + + if (work_done) + ionic_rx_fill_cb(rxcq->bound_q); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + DEBUG_STATS_INTR_REARM(rxcq->bound_intr); + } + + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, + work_done, flags); + } + + DEBUG_STATS_NAPI_POLL(qcq, work_done); + + return work_done; +} + +static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct device *dev = q->lif->ionic->dev; + dma_addr_t dma_addr; + + dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + net_warn_ratelimited("%s: DMA single map failed on %s!\n", + q->lif->netdev->name, q->name); + stats->dma_map_err++; + return 0; + } + return dma_addr; +} + +static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag, + size_t offset, size_t len) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct device *dev = q->lif->ionic->dev; + dma_addr_t dma_addr; + + dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + net_warn_ratelimited("%s: DMA frag map failed on %s!\n", + q->lif->netdev->name, q->name); + stats->dma_map_err++; + } + return dma_addr; +} + +static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg) +{ + struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc; + struct ionic_txq_sg_elem *elem = sg_desc->elems; + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct ionic_txq_desc *desc = desc_info->desc; + struct device *dev = q->lif->ionic->dev; + u8 opcode, flags, nsge; + u16 queue_index; + unsigned int i; + u64 addr; + + decode_txq_desc_cmd(le64_to_cpu(desc->cmd), + &opcode, &flags, &nsge, &addr); + + /* use unmap_single only if either this is not TSO, + * or this is first descriptor of a TSO + */ + if (opcode != IONIC_TXQ_DESC_OPCODE_TSO || + flags & IONIC_TXQ_DESC_FLAG_TSO_SOT) + dma_unmap_single(dev, (dma_addr_t)addr, + le16_to_cpu(desc->len), DMA_TO_DEVICE); + else + dma_unmap_page(dev, (dma_addr_t)addr, + le16_to_cpu(desc->len), DMA_TO_DEVICE); + + for (i = 0; i < nsge; i++, elem++) + dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr), + le16_to_cpu(elem->len), DMA_TO_DEVICE); + + if (cb_arg) { + struct sk_buff *skb = cb_arg; + u32 len = skb->len; + + queue_index = skb_get_queue_mapping(skb); + if (unlikely(__netif_subqueue_stopped(q->lif->netdev, + queue_index))) { + netif_wake_subqueue(q->lif->netdev, queue_index); + q->wake++; + } + dev_kfree_skb_any(skb); + stats->clean++; + netdev_tx_completed_queue(q_to_ndq(q), 1, len); + } +} + +void ionic_tx_flush(struct ionic_cq *cq) +{ + struct ionic_txq_comp *comp = cq->tail->cq_desc; + struct ionic_dev *idev = &cq->lif->ionic->idev; + struct ionic_queue *q = cq->bound_q; + struct ionic_desc_info *desc_info; + unsigned int work_done = 0; + + /* walk the completed cq entries */ + while (work_done < cq->num_descs && + color_match(comp->color, cq->done_color)) { + + /* clean the related q entries, there could be + * several q entries completed for each cq completion + */ + do { + desc_info = q->tail; + q->tail = desc_info->next; + ionic_tx_clean(q, desc_info, cq->tail, + desc_info->cb_arg); + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + } while (desc_info->index != le16_to_cpu(comp->comp_index)); + + if (cq->tail->last) + cq->done_color = !cq->done_color; + + cq->tail = cq->tail->next; + comp = cq->tail->cq_desc; + DEBUG_STATS_CQE_CNT(cq); + + work_done++; + } + + if (work_done) + ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, + work_done, 0); +} + +static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) +{ + int err; + + err = skb_cow_head(skb, 0); + if (err) + return err; + + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + inner_ip_hdr(skb)->check = 0; + inner_tcp_hdr(skb)->check = + ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, + inner_ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { + inner_tcp_hdr(skb)->check = + ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, + &inner_ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + return 0; +} + +static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) +{ + int err; + + err = skb_cow_head(skb, 0); + if (err) + return err; + + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + return 0; +} + +static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, + struct sk_buff *skb, + dma_addr_t addr, u8 nsge, u16 len, + unsigned int hdrlen, unsigned int mss, + bool outer_csum, + u16 vlan_tci, bool has_vlan, + bool start, bool done) +{ + u8 flags = 0; + u64 cmd; + + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; + flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; + + cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); + desc->cmd = cpu_to_le64(cmd); + desc->len = cpu_to_le16(len); + desc->vlan_tci = cpu_to_le16(vlan_tci); + desc->hdr_len = cpu_to_le16(hdrlen); + desc->mss = cpu_to_le16(mss); + + if (done) { + skb_tx_timestamp(skb); + netdev_tx_sent_queue(q_to_ndq(q), skb->len); + ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); + } else { + ionic_txq_post(q, false, ionic_tx_clean, NULL); + } +} + +static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, + struct ionic_txq_sg_elem **elem) +{ + struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; + struct ionic_txq_desc *desc = q->head->desc; + + *elem = sg_desc->elems; + return desc; +} + +static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct ionic_desc_info *abort = q->head; + struct device *dev = q->lif->ionic->dev; + struct ionic_desc_info *rewind = abort; + struct ionic_txq_sg_elem *elem; + struct ionic_txq_desc *desc; + unsigned int frag_left = 0; + unsigned int offset = 0; + unsigned int len_left; + dma_addr_t desc_addr; + unsigned int hdrlen; + unsigned int nfrags; + unsigned int seglen; + u64 total_bytes = 0; + u64 total_pkts = 0; + unsigned int left; + unsigned int len; + unsigned int mss; + skb_frag_t *frag; + bool start, done; + bool outer_csum; + bool has_vlan; + u16 desc_len; + u8 desc_nsge; + u16 vlan_tci; + bool encap; + int err; + + mss = skb_shinfo(skb)->gso_size; + nfrags = skb_shinfo(skb)->nr_frags; + len_left = skb->len - skb_headlen(skb); + outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) || + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); + has_vlan = !!skb_vlan_tag_present(skb); + vlan_tci = skb_vlan_tag_get(skb); + encap = skb->encapsulation; + + /* Preload inner-most TCP csum field with IP pseudo hdr + * calculated with IP length set to zero. HW will later + * add in length to each TCP segment resulting from the TSO. + */ + + if (encap) + err = ionic_tx_tcp_inner_pseudo_csum(skb); + else + err = ionic_tx_tcp_pseudo_csum(skb); + if (err) + return err; + + if (encap) + hdrlen = skb_inner_transport_header(skb) - skb->data + + inner_tcp_hdrlen(skb); + else + hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); + + seglen = hdrlen + mss; + left = skb_headlen(skb); + + desc = ionic_tx_tso_next(q, &elem); + start = true; + + /* Chop skb->data up into desc segments */ + + while (left > 0) { + len = min(seglen, left); + frag_left = seglen - len; + desc_addr = ionic_tx_map_single(q, skb->data + offset, len); + if (dma_mapping_error(dev, desc_addr)) + goto err_out_abort; + desc_len = len; + desc_nsge = 0; + left -= len; + offset += len; + if (nfrags > 0 && frag_left > 0) + continue; + done = (nfrags == 0 && left == 0); + ionic_tx_tso_post(q, desc, skb, + desc_addr, desc_nsge, desc_len, + hdrlen, mss, + outer_csum, + vlan_tci, has_vlan, + start, done); + total_pkts++; + total_bytes += start ? len : len + hdrlen; + desc = ionic_tx_tso_next(q, &elem); + start = false; + seglen = mss; + } + + /* Chop skb frags into desc segments */ + + for (frag = skb_shinfo(skb)->frags; len_left; frag++) { + offset = 0; + left = skb_frag_size(frag); + len_left -= left; + nfrags--; + stats->frags++; + + while (left > 0) { + if (frag_left > 0) { + len = min(frag_left, left); + frag_left -= len; + elem->addr = + cpu_to_le64(ionic_tx_map_frag(q, frag, + offset, len)); + if (dma_mapping_error(dev, elem->addr)) + goto err_out_abort; + elem->len = cpu_to_le16(len); + elem++; + desc_nsge++; + left -= len; + offset += len; + if (nfrags > 0 && frag_left > 0) + continue; + done = (nfrags == 0 && left == 0); + ionic_tx_tso_post(q, desc, skb, desc_addr, + desc_nsge, desc_len, + hdrlen, mss, outer_csum, + vlan_tci, has_vlan, + start, done); + total_pkts++; + total_bytes += start ? len : len + hdrlen; + desc = ionic_tx_tso_next(q, &elem); + start = false; + } else { + len = min(mss, left); + frag_left = mss - len; + desc_addr = ionic_tx_map_frag(q, frag, + offset, len); + if (dma_mapping_error(dev, desc_addr)) + goto err_out_abort; + desc_len = len; + desc_nsge = 0; + left -= len; + offset += len; + if (nfrags > 0 && frag_left > 0) + continue; + done = (nfrags == 0 && left == 0); + ionic_tx_tso_post(q, desc, skb, desc_addr, + desc_nsge, desc_len, + hdrlen, mss, outer_csum, + vlan_tci, has_vlan, + start, done); + total_pkts++; + total_bytes += start ? len : len + hdrlen; + desc = ionic_tx_tso_next(q, &elem); + start = false; + } + } + } + + stats->pkts += total_pkts; + stats->bytes += total_bytes; + stats->tso++; + + return 0; + +err_out_abort: + while (rewind->desc != q->head->desc) { + ionic_tx_clean(q, rewind, NULL, NULL); + rewind = rewind->next; + } + q->head = abort; + + return -ENOMEM; +} + +static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct ionic_txq_desc *desc = q->head->desc; + struct device *dev = q->lif->ionic->dev; + dma_addr_t dma_addr; + bool has_vlan; + u8 flags = 0; + bool encap; + u64 cmd; + + has_vlan = !!skb_vlan_tag_present(skb); + encap = skb->encapsulation; + + dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); + if (dma_mapping_error(dev, dma_addr)) + return -ENOMEM; + + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + + cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, + flags, skb_shinfo(skb)->nr_frags, dma_addr); + desc->cmd = cpu_to_le64(cmd); + desc->len = cpu_to_le16(skb_headlen(skb)); + desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); + desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); + desc->csum_offset = cpu_to_le16(skb->csum_offset); + + if (skb->csum_not_inet) + stats->crc32_csum++; + else + stats->csum++; + + return 0; +} + +static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct ionic_txq_desc *desc = q->head->desc; + struct device *dev = q->lif->ionic->dev; + dma_addr_t dma_addr; + bool has_vlan; + u8 flags = 0; + bool encap; + u64 cmd; + + has_vlan = !!skb_vlan_tag_present(skb); + encap = skb->encapsulation; + + dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); + if (dma_mapping_error(dev, dma_addr)) + return -ENOMEM; + + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + + cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, + flags, skb_shinfo(skb)->nr_frags, dma_addr); + desc->cmd = cpu_to_le64(cmd); + desc->len = cpu_to_le16(skb_headlen(skb)); + desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); + + stats->no_csum++; + + return 0; +} + +static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; + unsigned int len_left = skb->len - skb_headlen(skb); + struct ionic_txq_sg_elem *elem = sg_desc->elems; + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct device *dev = q->lif->ionic->dev; + dma_addr_t dma_addr; + skb_frag_t *frag; + u16 len; + + for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) { + len = skb_frag_size(frag); + elem->len = cpu_to_le16(len); + dma_addr = ionic_tx_map_frag(q, frag, 0, len); + if (dma_mapping_error(dev, dma_addr)) + return -ENOMEM; + elem->addr = cpu_to_le64(dma_addr); + len_left -= len; + stats->frags++; + } + + return 0; +} + +static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + int err; + + /* set up the initial descriptor */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + err = ionic_tx_calc_csum(q, skb); + else + err = ionic_tx_calc_no_csum(q, skb); + if (err) + return err; + + /* add frags */ + err = ionic_tx_skb_frags(q, skb); + if (err) + return err; + + skb_tx_timestamp(skb); + stats->pkts++; + stats->bytes += skb->len; + + netdev_tx_sent_queue(q_to_ndq(q), skb->len); + ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); + + return 0; +} + +static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + int err; + + /* If TSO, need roundup(skb->len/mss) descs */ + if (skb_is_gso(skb)) + return (skb->len / skb_shinfo(skb)->gso_size) + 1; + + /* If non-TSO, just need 1 desc and nr_frags sg elems */ + if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS) + return 1; + + /* Too many frags, so linearize */ + err = skb_linearize(skb); + if (err) + return err; + + stats->linearize++; + + /* Need 1 desc and zero sg elems */ + return 1; +} + +static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) +{ + int stopped = 0; + + if (unlikely(!ionic_q_has_space(q, ndescs))) { + netif_stop_subqueue(q->lif->netdev, q->index); + q->stop++; + stopped = 1; + + /* Might race with ionic_tx_clean, check again */ + smp_rmb(); + if (ionic_q_has_space(q, ndescs)) { + netif_wake_subqueue(q->lif->netdev, q->index); + stopped = 0; + } + } + + return stopped; +} + +netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + u16 queue_index = skb_get_queue_mapping(skb); + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_queue *q; + int ndescs; + int err; + + if (unlikely(!test_bit(IONIC_LIF_UP, lif->state))) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (unlikely(!lif_to_txqcq(lif, queue_index))) + queue_index = 0; + q = lif_to_txq(lif, queue_index); + + ndescs = ionic_tx_descs_needed(q, skb); + if (ndescs < 0) + goto err_out_drop; + + if (unlikely(ionic_maybe_stop_tx(q, ndescs))) + return NETDEV_TX_BUSY; + + if (skb_is_gso(skb)) + err = ionic_tx_tso(q, skb); + else + err = ionic_tx(q, skb); + + if (err) + goto err_out_drop; + + /* Stop the queue if there aren't descriptors for the next packet. + * Since our SG lists per descriptor take care of most of the possible + * fragmentation, we don't need to have many descriptors available. + */ + ionic_maybe_stop_tx(q, 4); + + return NETDEV_TX_OK; + +err_out_drop: + q->stop++; + q->drop++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h new file mode 100644 index 000000000000..53775c62c85a --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_TXRX_H_ +#define _IONIC_TXRX_H_ + +void ionic_rx_flush(struct ionic_cq *cq); +void ionic_tx_flush(struct ionic_cq *cq); + +void ionic_rx_fill(struct ionic_queue *q); +void ionic_rx_empty(struct ionic_queue *q); +int ionic_rx_napi(struct napi_struct *napi, int budget); +netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev); + +#endif /* _IONIC_TXRX_H_ */ diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index a391cf6ee4b2..55a29ec76680 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -66,15 +66,6 @@ config QLCNIC_HWMON This data is available via the hwmon sysfs interface. -config QLGE - tristate "QLogic QLGE 10Gb Ethernet Driver Support" - depends on PCI - ---help--- - This driver supports QLogic ISP8XXX 10Gb Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called qlge. - config NETXEN_NIC tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" depends on PCI diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile index 6cd2e333a5fc..1ae4a0743bd5 100644 --- a/drivers/net/ethernet/qlogic/Makefile +++ b/drivers/net/ethernet/qlogic/Makefile @@ -5,7 +5,6 @@ obj-$(CONFIG_QLA3XXX) += qla3xxx.o obj-$(CONFIG_QLCNIC) += qlcnic/ -obj-$(CONFIG_QLGE) += qlge/ obj-$(CONFIG_NETXEN_NIC) += netxen/ obj-$(CONFIG_QED) += qed/ obj-$(CONFIG_QEDE)+= qede/ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 58e2eaf77014..c692a41e4548 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1980,7 +1980,7 @@ netxen_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) { struct netxen_skb_frag *nf; - struct skb_frag_struct *frag; + skb_frag_t *frag; int i, nr_frags; dma_addr_t map; @@ -2043,7 +2043,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct pci_dev *pdev; int i, k; int delta = 0; - struct skb_frag_struct *frag; + skb_frag_t *frag; u32 producer; int frag_count; diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 5ea6c4fc6050..859caa6c1a1f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1756,6 +1756,15 @@ static u32 qed_read_unaligned_dword(u8 *buf) return dword; } +/* Sets the value of the specified GRC param */ +static void qed_grc_set_param(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param, u32 val) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + dev_data->grc.param_val[grc_param] = val; +} + /* Returns the value of the specified GRC param */ static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn, enum dbg_grc_params grc_param) @@ -5119,6 +5128,69 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn, return false; } +enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum dbg_grc_params grc_param, u32 val) +{ + enum dbg_status status; + int i; + + DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, + "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val); + + status = qed_dbg_dev_init(p_hwfn, p_ptt); + if (status != DBG_STATUS_OK) + return status; + + /* Initializes the GRC parameters (if not initialized). Needed in order + * to set the default parameter values for the first time. + */ + qed_dbg_grc_init_params(p_hwfn); + + if (grc_param >= MAX_DBG_GRC_PARAMS) + return DBG_STATUS_INVALID_ARGS; + if (val < s_grc_param_defs[grc_param].min || + val > s_grc_param_defs[grc_param].max) + return DBG_STATUS_INVALID_ARGS; + + if (s_grc_param_defs[grc_param].is_preset) { + /* Preset param */ + + /* Disabling a preset is not allowed. Call + * dbg_grc_set_params_default instead. + */ + if (!val) + return DBG_STATUS_INVALID_ARGS; + + /* Update all params with the preset values */ + for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) { + u32 preset_val; + + /* Skip persistent params */ + if (s_grc_param_defs[i].is_persistent) + continue; + + /* Find preset value */ + if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL) + preset_val = + s_grc_param_defs[i].exclude_all_preset_val; + else if (grc_param == DBG_GRC_PARAM_CRASH) + preset_val = + s_grc_param_defs[i].crash_preset_val; + else + return DBG_STATUS_INVALID_ARGS; + + qed_grc_set_param(p_hwfn, + (enum dbg_grc_params)i, preset_val); + } + } else { + /* Regular param - set its value */ + qed_grc_set_param(p_hwfn, grc_param, val); + } + + return DBG_STATUS_OK; +} + /* Assign default GRC param values */ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) { @@ -7997,9 +8069,16 @@ static u32 qed_calc_regdump_header(enum debug_print_features feature, int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) { u8 cur_engine, omit_engine = 0, org_engine; + struct qed_hwfn *p_hwfn = + &cdev->hwfns[cdev->dbg_params.engine_for_debug]; + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + int grc_params[MAX_DBG_GRC_PARAMS], i; u32 offset = 0, feature_size; int rc; + for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) + grc_params[i] = dev_data->grc.param_val[i]; + if (cdev->num_hwfns == 1) omit_engine = 1; @@ -8087,6 +8166,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) rc); } + for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) + dev_data->grc.param_val[i] = grc_params[i]; + /* GRC dump - must be last because when mcp stuck it will * clutter idle_chk, reg_fifo, ... */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index e054f6c69e3a..cf3ceb62e397 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -3024,6 +3024,21 @@ void qed_read_regs(struct qed_hwfn *p_hwfn, */ bool qed_read_fw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct fw_info *fw_info); +/** + * @brief qed_dbg_grc_config - Sets the value of a GRC parameter. + * + * @param p_hwfn - HW device data + * @param grc_param - GRC parameter + * @param val - Value to set. + * + * @return error if one of the following holds: + * - the version wasn't set + * - grc_param is invalid + * - val is outside the allowed boundaries + */ +enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum dbg_grc_params grc_param, u32 val); /** * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their @@ -12580,6 +12595,8 @@ struct public_drv_mb { #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 #define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000 +#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000 +#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000 #define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 #define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 @@ -12748,6 +12765,21 @@ struct public_drv_mb { #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 #define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000FFFF +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000 + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_UNSUPPORTED 0x00000000 diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index f380fae8799d..65ec16a31658 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -382,7 +382,7 @@ qed_iwarp2roce_state(enum qed_iwarp_qp_state state) } } -const static char *iwarp_state_names[] = { +static const char * const iwarp_state_names[] = { "IDLE", "RTS", "TERMINATE", diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 9f36e7948222..1a5fc2ae351c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1631,10 +1631,9 @@ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, } } -static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack void +__qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_pstorm_per_queue_stat pstats; u32 pstats_addr = 0, pstats_len = 0; @@ -1661,10 +1660,9 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, HILO_64_REGPAIR(pstats.error_drop_pkts); } -static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack void +__qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct tstorm_per_port_stat tstats; u32 tstats_addr, tstats_len; @@ -1709,10 +1707,9 @@ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, } } -static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack +void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_ustorm_per_queue_stat ustats; u32 ustats_addr = 0, ustats_len = 0; @@ -1751,10 +1748,9 @@ static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, } } -static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack void +__qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_mstorm_per_queue_stat mstats; u32 mstats_addr = 0, mstats_len = 0; @@ -1780,9 +1776,9 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); } -static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats) +static noinline_for_stack void +__qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats) { struct qed_eth_stats_common *p_common = &p_stats->common; struct port_stats port_stats; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 1efff7f68ef6..2ce70097d018 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -67,6 +67,10 @@ #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) #define QED_RDMA_SRQS QED_ROCE_QPS +#define QED_NVM_CFG_SET_FLAGS 0xE +#define QED_NVM_CFG_SET_PF_FLAGS 0x1E +#define QED_NVM_CFG_GET_FLAGS 0xA +#define QED_NVM_CFG_GET_PF_FLAGS 0x1A static char version[] = "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; @@ -1690,6 +1694,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, switch (media_type) { case MEDIA_DA_TWINAX: + *if_capability |= QED_LM_FIBRE_BIT; if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) *if_capability |= QED_LM_20000baseKR2_Full_BIT; /* For DAC media multiple speed capabilities are supported*/ @@ -1709,6 +1714,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, *if_capability |= QED_LM_100000baseCR4_Full_BIT; break; case MEDIA_BASE_T: + *if_capability |= QED_LM_TP_BIT; if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { @@ -1720,6 +1726,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, } } if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { + *if_capability |= QED_LM_FIBRE_BIT; if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET) *if_capability |= QED_LM_1000baseT_Full_BIT; if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET) @@ -1730,6 +1737,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, case MEDIA_SFPP_10G_FIBER: case MEDIA_XFP_FIBER: case MEDIA_MODULE_FIBER: + *if_capability |= QED_LM_FIBRE_BIT; if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) || @@ -1772,6 +1780,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, break; case MEDIA_KR: + *if_capability |= QED_LM_Backplane_BIT; if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) *if_capability |= QED_LM_20000baseKR2_Full_BIT; if (capability & @@ -1823,7 +1832,6 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if_link->link_up = true; /* TODO - at the moment assume supported and advertised speed equal */ - if_link->supported_caps = QED_LM_FIBRE_BIT; if (link_caps.default_speed_autoneg) if_link->supported_caps |= QED_LM_Autoneg_BIT; if (params.pause.autoneg || @@ -2229,6 +2237,119 @@ static int qed_nvm_flash_image_validate(struct qed_dev *cdev, return 0; } +/* Binary file format - + * /----------------------------------------------------------------------\ + * 0B | 0x5 [command index] | + * 4B | Number of config attributes | Reserved | + * 4B | Config ID | Entity ID | Length | + * 4B | Value | + * | | + * \----------------------------------------------------------------------/ + * There can be several cfg_id-entity_id-Length-Value sets as specified by + * 'Number of config attributes'. + * + * The API parses config attributes from the user provided buffer and flashes + * them to the respective NVM path using Management FW inerface. + */ +static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 entity_id, len, buf[32]; + struct qed_ptt *ptt; + u16 cfg_id, count; + int rc = 0, i; + u32 flags; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + /* NVM CFG ID attribute header */ + *data += 4; + count = *((u16 *)*data); + *data += 4; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "Read config ids: num_attrs = %0d\n", count); + /* NVM CFG ID attributes */ + for (i = 0; i < count; i++) { + cfg_id = *((u16 *)*data); + *data += 2; + entity_id = **data; + (*data)++; + len = **data; + (*data)++; + memcpy(buf, *data, len); + *data += len; + + flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS : + QED_NVM_CFG_SET_FLAGS; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "cfg_id = %d entity = %d len = %d\n", cfg_id, + entity_id, len); + rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, + buf, len); + if (rc) { + DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); + break; + } + } + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +#define QED_MAX_NVM_BUF_LEN 32 +static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 buf[QED_MAX_NVM_BUF_LEN]; + struct qed_ptt *ptt; + u32 len; + int rc; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return QED_MAX_NVM_BUF_LEN; + + rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, + &len); + if (rc || !len) { + DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); + len = QED_MAX_NVM_BUF_LEN; + } + + qed_ptt_release(hwfn, ptt); + + return len; +} + +static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, + u32 cmd, u32 entity_id) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + u32 flags, len; + int rc = 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "Read config cmd = %d entity id %d\n", cmd, entity_id); + flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; + rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); + if (rc) + DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + static int qed_nvm_flash(struct qed_dev *cdev, const char *name) { const struct firmware *image; @@ -2270,6 +2391,9 @@ static int qed_nvm_flash(struct qed_dev *cdev, const char *name) rc = qed_nvm_flash_image_access(cdev, &data, &check_resp); break; + case QED_NVM_FLASH_CMD_NVM_CFG_ID: + rc = qed_nvm_flash_cfg_write(cdev, &data); + break; default: DP_ERR(cdev, "Unknown command %08x\n", cmd_type); rc = -EINVAL; @@ -2485,6 +2609,26 @@ static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, return rc; } +static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) { return QED_AFFIN_HWFN_IDX(cdev); @@ -2538,6 +2682,9 @@ const struct qed_common_ops qed_common_ops_pass = { .db_recovery_del = &qed_db_recovery_del, .read_module_eeprom = &qed_read_module_eeprom, .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, + .read_nvm_cfg = &qed_nvm_flash_cfg_read, + .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, + .set_grc_config = &qed_set_grc_config, }; void qed_get_protocol_stats(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 758702c1ce9c..36ddb89856a8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3750,3 +3750,64 @@ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return 0; } + +int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 *p_len) +{ + u32 mb_param = 0, resp, param; + int rc; + + QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); + if (flags & QED_NVM_CFG_OPTION_INIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); + if (flags & QED_NVM_CFG_OPTION_FREE) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); + if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, + entity_id); + } + + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_GET_NVM_CFG_OPTION, + mb_param, &resp, ¶m, p_len, (u32 *)p_buf); + + return rc; +} + +int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 len) +{ + u32 mb_param = 0, resp, param; + + QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); + if (flags & QED_NVM_CFG_OPTION_ALL) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1); + if (flags & QED_NVM_CFG_OPTION_INIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); + if (flags & QED_NVM_CFG_OPTION_COMMIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1); + if (flags & QED_NVM_CFG_OPTION_FREE) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); + if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, + entity_id); + } + + return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_SET_NVM_CFG_OPTION, + mb_param, &resp, ¶m, len, (u32 *)p_buf); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index e4f8fe4bd062..9c4c2763de8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -251,6 +251,12 @@ union qed_mfw_tlv_data { struct qed_mfw_tlv_iscsi iscsi; }; +#define QED_NVM_CFG_OPTION_ALL BIT(0) +#define QED_NVM_CFG_OPTION_INIT BIT(1) +#define QED_NVM_CFG_OPTION_COMMIT BIT(2) +#define QED_NVM_CFG_OPTION_FREE BIT(3) +#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4) + /** * @brief - returns the link params of the hw function * @@ -1202,4 +1208,33 @@ int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); */ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief Get NVM config attribute value. + * + * @param p_hwfn + * @param p_ptt + * @param option_id + * @param entity_id + * @param flags + * @param p_buf + * @param p_len + */ +int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 *p_len); + +/** + * @brief Set NVM config attribute value. + * + * @param p_hwfn + * @param p_ptt + * @param option_id + * @param entity_id + * @param flags + * @param p_buf + * @param len + */ +int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 len); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 5dda547772c1..856051f50eb7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -231,7 +231,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", + "PF unwilling to fulfill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", p_req->num_rxqs, p_resp->num_rxqs, p_req->num_rxqs, diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 0e931c04fecf..c303a92d5b06 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -177,6 +177,20 @@ enum qede_flags_bit { QEDE_FLAGS_TX_TIMESTAMPING_EN }; +#define QEDE_DUMP_MAX_ARGS 4 +enum qede_dump_cmd { + QEDE_DUMP_CMD_NONE = 0, + QEDE_DUMP_CMD_NVM_CFG, + QEDE_DUMP_CMD_GRCDUMP, + QEDE_DUMP_CMD_MAX +}; + +struct qede_dump_info { + enum qede_dump_cmd cmd; + u8 num_args; + u32 args[QEDE_DUMP_MAX_ARGS]; +}; + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -262,6 +276,7 @@ struct qede_dev { struct qede_rdma_dev rdma_info; struct bpf_prog *xdp_prog; + struct qede_dump_info dump_info; }; enum QEDE_STATE { diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index e85f9fef930c..8a426afb6a55 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -48,6 +48,8 @@ {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} #define QEDE_SELFTEST_POLL_COUNT 100 +#define QEDE_DUMP_VERSION 0x1 +#define QEDE_DUMP_NVM_ARG_COUNT 2 static const struct { u64 offset; @@ -424,12 +426,13 @@ struct qede_link_mode_mapping { }; static const struct qede_link_mode_mapping qed_lm_map[] = { + {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT}, {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, {QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT}, - {QED_LM_2500baseX_Full_BIT, ETHTOOL_LINK_MODE_2500baseX_Full_BIT}, + {QED_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, {QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, {QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, {QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT}, @@ -1972,6 +1975,117 @@ static int qede_get_module_eeprom(struct net_device *dev, return rc; } +static int qede_set_dump(struct net_device *dev, struct ethtool_dump *val) +{ + struct qede_dev *edev = netdev_priv(dev); + int rc = 0; + + if (edev->dump_info.cmd == QEDE_DUMP_CMD_NONE) { + if (val->flag > QEDE_DUMP_CMD_MAX) { + DP_ERR(edev, "Invalid command %d\n", val->flag); + return -EINVAL; + } + edev->dump_info.cmd = val->flag; + edev->dump_info.num_args = 0; + return 0; + } + + if (edev->dump_info.num_args == QEDE_DUMP_MAX_ARGS) { + DP_ERR(edev, "Arg count = %d\n", edev->dump_info.num_args); + return -EINVAL; + } + + switch (edev->dump_info.cmd) { + case QEDE_DUMP_CMD_NVM_CFG: + edev->dump_info.args[edev->dump_info.num_args] = val->flag; + edev->dump_info.num_args++; + break; + case QEDE_DUMP_CMD_GRCDUMP: + rc = edev->ops->common->set_grc_config(edev->cdev, + val->flag, 1); + break; + default: + break; + } + + return rc; +} + +static int qede_get_dump_flag(struct net_device *dev, + struct ethtool_dump *dump) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops || !edev->ops->common) { + DP_ERR(edev, "Edev ops not populated\n"); + return -EINVAL; + } + + dump->version = QEDE_DUMP_VERSION; + switch (edev->dump_info.cmd) { + case QEDE_DUMP_CMD_NVM_CFG: + dump->flag = QEDE_DUMP_CMD_NVM_CFG; + dump->len = edev->ops->common->read_nvm_cfg_len(edev->cdev, + edev->dump_info.args[0]); + break; + case QEDE_DUMP_CMD_GRCDUMP: + dump->flag = QEDE_DUMP_CMD_GRCDUMP; + dump->len = edev->ops->common->dbg_all_data_size(edev->cdev); + break; + default: + DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd); + return -EINVAL; + } + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "dump->version = 0x%x dump->flag = %d dump->len = %d\n", + dump->version, dump->flag, dump->len); + return 0; +} + +static int qede_get_dump_data(struct net_device *dev, + struct ethtool_dump *dump, void *buf) +{ + struct qede_dev *edev = netdev_priv(dev); + int rc = 0; + + if (!edev->ops || !edev->ops->common) { + DP_ERR(edev, "Edev ops not populated\n"); + rc = -EINVAL; + goto err; + } + + switch (edev->dump_info.cmd) { + case QEDE_DUMP_CMD_NVM_CFG: + if (edev->dump_info.num_args != QEDE_DUMP_NVM_ARG_COUNT) { + DP_ERR(edev, "Arg count = %d required = %d\n", + edev->dump_info.num_args, + QEDE_DUMP_NVM_ARG_COUNT); + rc = -EINVAL; + goto err; + } + rc = edev->ops->common->read_nvm_cfg(edev->cdev, (u8 **)&buf, + edev->dump_info.args[0], + edev->dump_info.args[1]); + break; + case QEDE_DUMP_CMD_GRCDUMP: + memset(buf, 0, dump->len); + rc = edev->ops->common->dbg_all_data(edev->cdev, buf); + break; + default: + DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd); + rc = -EINVAL; + break; + } + +err: + edev->dump_info.cmd = QEDE_DUMP_CMD_NONE; + edev->dump_info.num_args = 0; + memset(edev->dump_info.args, 0, sizeof(edev->dump_info.args)); + + return rc; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_link_ksettings = qede_get_link_ksettings, .set_link_ksettings = qede_set_link_ksettings, @@ -2013,6 +2127,9 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_tunable = qede_get_tunable, .set_tunable = qede_set_tunable, .flash_device = qede_flash_device, + .get_dump_flag = qede_get_dump_flag, + .get_dump_data = qede_get_dump_data, + .set_dump = qede_set_dump, }; static const struct ethtool_ops qede_vf_ethtool_ops = { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 14f26bf3b388..ac61f614de37 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -581,7 +581,7 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf) { struct qlcnic_skb_frag *nf; - struct skb_frag_struct *frag; + skb_frag_t *frag; int i, nr_frags; dma_addr_t map; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 707665b62eb7..bebe38d74d66 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1385,15 +1385,13 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, } for (i = 0; i < nr_frags; i++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); - tpbuf->length = frag->size; - tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, - frag->page.p, frag->page_offset, - tpbuf->length, DMA_TO_DEVICE); + tpbuf->length = skb_frag_size(frag); + tpbuf->dma_addr = skb_frag_dma_map(adpt->netdev->dev.parent, + frag, 0, tpbuf->length, + DMA_TO_DEVICE); ret = dma_mapping_error(adpt->netdev->dev.parent, tpbuf->dma_addr); if (ret) diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 59c2349b59df..c84ab052ef26 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -544,7 +544,6 @@ static int emac_probe_resources(struct platform_device *pdev, struct emac_adapter *adpt) { struct net_device *netdev = adpt->netdev; - struct resource *res; char maddr[ETH_ALEN]; int ret = 0; @@ -556,22 +555,17 @@ static int emac_probe_resources(struct platform_device *pdev, /* Core 0 interrupt */ ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(&pdev->dev, - "error: missing core0 irq resource (error=%i)\n", ret); + if (ret < 0) return ret; - } adpt->irq.irq = ret; /* base register address */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - adpt->base = devm_ioremap_resource(&pdev->dev, res); + adpt->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(adpt->base)) return PTR_ERR(adpt->base); /* CSR register address */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - adpt->csr = devm_ioremap_resource(&pdev->dev, res); + adpt->csr = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(adpt->csr)) return PTR_ERR(adpt->csr); diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index bcb890b18a94..702aa217a27a 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -131,17 +131,10 @@ DEFINE_SHOW_ATTRIBUTE(qcaspi_info); void qcaspi_init_device_debugfs(struct qcaspi *qca) { - struct dentry *device_root; + qca->device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev), + NULL); - device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev), NULL); - qca->device_root = device_root; - - if (IS_ERR(device_root) || !device_root) { - pr_warn("failed to create debugfs directory for %s\n", - dev_name(&qca->net_dev->dev)); - return; - } - debugfs_create_file("info", S_IFREG | 0444, device_root, qca, + debugfs_create_file("info", S_IFREG | 0444, qca->device_root, qca, &qcaspi_info_fops); } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index b28360bc2255..5ecf61df78bd 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -837,8 +837,7 @@ qcaspi_netdev_uninit(struct net_device *dev) kfree(qca->rx_buffer); qca->buffer_size = 0; - if (qca->rx_skb) - dev_kfree_skb(qca->rx_skb); + dev_kfree_skb(qca->rx_skb); } static const struct net_device_ops qcaspi_netdev_ops = { diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c index 590616846cd1..0981068504fa 100644 --- a/drivers/net/ethernet/qualcomm/qca_uart.c +++ b/drivers/net/ethernet/qualcomm/qca_uart.c @@ -285,8 +285,7 @@ static void qcauart_netdev_uninit(struct net_device *dev) { struct qcauart *qca = netdev_priv(dev); - if (qca->rx_skb) - dev_kfree_skb(qca->rx_skb); + dev_kfree_skb(qca->rx_skb); } static const struct net_device_ops qcauart_netdev_ops = { diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index b18e7a91d5cd..5e0b9d2f14f7 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -96,14 +96,19 @@ config 8139_OLD_RX_RESET old RX-reset behavior. If unsure, say N. config R8169 - tristate "Realtek 8169 gigabit ethernet support" + tristate "Realtek 8169/8168/8101/8125 ethernet support" depends on PCI select FW_LOADER select CRC32 select PHYLIB select REALTEK_PHY ---help--- - Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. + Say Y here if you have a Realtek Ethernet adapter belonging to + the following families: + RTL8169 Gigabit Ethernet + RTL8168 Gigabit Ethernet + RTL8101 Fast Ethernet + RTL8125 2.5GBit Ethernet To compile this driver as a module, choose M here: the module will be called r8169. This is recommended. diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index bae0074ab9aa..0ef01db1f8b8 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -55,13 +55,14 @@ #define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" +#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ -static const int multicast_filter_limit = 32; +#define MC_FILTER_LIMIT 32 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -135,6 +136,8 @@ enum mac_version { RTL_GIGA_MAC_VER_49, RTL_GIGA_MAC_VER_50, RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_VER_60, + RTL_GIGA_MAC_VER_61, RTL_GIGA_MAC_NONE }; @@ -200,6 +203,8 @@ static const struct { [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" }, [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_60] = {"RTL8125" }, + [RTL_GIGA_MAC_VER_61] = {"RTL8125", FIRMWARE_8125A_3}, }; static const struct pci_device_id rtl8169_pci_tbl[] = { @@ -220,6 +225,8 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { { PCI_VDEVICE(USR, 0x0116) }, { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 }, { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 }, + { PCI_VDEVICE(REALTEK, 0x8125) }, + { PCI_VDEVICE(REALTEK, 0x3000) }, {} }; @@ -271,7 +278,6 @@ enum rtl_registers { Config3 = 0x54, Config4 = 0x55, Config5 = 0x56, - MultiIntr = 0x5c, PHYAR = 0x60, PHYstatus = 0x6c, RxMaxSize = 0xda, @@ -385,6 +391,19 @@ enum rtl8168_registers { #define EARLY_TALLY_EN (1 << 16) }; +enum rtl8125_registers { + IntrMask_8125 = 0x38, + IntrStatus_8125 = 0x3c, + TxPoll_8125 = 0x90, + MAC0_BKP = 0x19e0, +}; + +#define RX_VLAN_INNER_8125 BIT(22) +#define RX_VLAN_OUTER_8125 BIT(23) +#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125) + +#define RX_FETCH_DFLT_8125 (8 << 27) + enum rtl_register_content { /* InterruptStatusBits */ SYSErr = 0x8000, @@ -539,11 +558,11 @@ enum rtl_tx_desc_bit_1 { TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ #define GTTCPHO_SHIFT 18 -#define GTTCPHO_MAX 0x7fU +#define GTTCPHO_MAX 0x7f /* Second doubleword. */ #define TCPHO_SHIFT 18 -#define TCPHO_MAX 0x3ffU +#define TCPHO_MAX 0x3ff #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ @@ -569,6 +588,11 @@ enum rtl_rx_desc_bit { #define RsvdMask 0x3fffc000 +#define RTL_GSO_MAX_SIZE_V1 32000 +#define RTL_GSO_MAX_SEGS_V1 24 +#define RTL_GSO_MAX_SIZE_V2 64000 +#define RTL_GSO_MAX_SEGS_V2 64 + struct TxDesc { __le32 opts1; __le32 opts2; @@ -638,10 +662,10 @@ struct rtl8169_private { struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ dma_addr_t TxPhyAddr; dma_addr_t RxPhyAddr; - void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ u16 cp_cmd; - u16 irq_mask; + u32 irq_mask; struct clk *clk; struct { @@ -691,6 +715,7 @@ MODULE_FIRMWARE(FIRMWARE_8168H_1); MODULE_FIRMWARE(FIRMWARE_8168H_2); MODULE_FIRMWARE(FIRMWARE_8107E_1); MODULE_FIRMWARE(FIRMWARE_8107E_2); +MODULE_FIRMWARE(FIRMWARE_8125A_3); static inline struct device *tp_to_dev(struct rtl8169_private *tp) { @@ -723,12 +748,33 @@ static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force) PCI_EXP_DEVCTL_READRQ, force); } +static bool rtl_is_8125(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_60; +} + static bool rtl_is_8168evl_up(struct rtl8169_private *tp) { return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_39 && + tp->mac_version <= RTL_GIGA_MAC_VER_51; +} + +static bool rtl_supports_eee(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_37 && tp->mac_version != RTL_GIGA_MAC_VER_39; } +static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + mac[i] = RTL_R8(tp, reg + i); +} + struct rtl_cond { bool (*check)(struct rtl8169_private *); const char *msg; @@ -846,6 +892,14 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) return RTL_R32(tp, OCPDR); } +static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, + u16 set) +{ + u16 data = r8168_mac_ocp_read(tp, reg); + + r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); +} + #define OCP_STD_PHY_BASE 0xa400 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) @@ -995,7 +1049,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val) case RTL_GIGA_MAC_VER_31: r8168dp_2_mdio_write(tp, location, val); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61: r8168g_mdio_write(tp, location, val); break; default: @@ -1012,7 +1066,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location) case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_2_mdio_read(tp, location); - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61: return r8168g_mdio_read(tp, location); default: return r8169_mdio_read(tp, location); @@ -1294,14 +1348,28 @@ static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; } -static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) +static u32 rtl_get_events(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + return RTL_R32(tp, IntrStatus_8125); + else + return RTL_R16(tp, IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u32 bits) { - RTL_W16(tp, IntrStatus, bits); + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrStatus_8125, bits); + else + RTL_W16(tp, IntrStatus, bits); } static void rtl_irq_disable(struct rtl8169_private *tp) { - RTL_W16(tp, IntrMask, 0); + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, 0); + else + RTL_W16(tp, IntrMask, 0); tp->irq_enabled = 0; } @@ -1312,13 +1380,16 @@ static void rtl_irq_disable(struct rtl8169_private *tp) static void rtl_irq_enable(struct rtl8169_private *tp) { tp->irq_enabled = 1; - RTL_W16(tp, IntrMask, tp->irq_mask); + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, tp->irq_mask); + else + RTL_W16(tp, IntrMask, tp->irq_mask); } static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { rtl_irq_disable(tp); - rtl_ack_events(tp, 0xffff); + rtl_ack_events(tp, 0xffffffff); /* PCI commit */ RTL_R8(tp, ChipCmd); } @@ -1377,7 +1448,6 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { - unsigned int i, tmp; static const struct { u32 opt; u16 reg; @@ -1390,20 +1460,25 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { WAKE_ANY, Config5, LanWake }, { WAKE_MAGIC, Config3, MagicPacket } }; + unsigned int i, tmp = ARRAY_SIZE(cfg); u8 options; rtl_unlock_config_regs(tp); if (rtl_is_8168evl_up(tp)) { - tmp = ARRAY_SIZE(cfg) - 1; + tmp--; if (wolopts & WAKE_MAGIC) rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100, MagicPacket_v2); else rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100, MagicPacket_v2); - } else { - tmp = ARRAY_SIZE(cfg); + } else if (rtl_is_8125(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0)); + else + r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); } for (i = 0; i < tmp; i++) { @@ -1414,18 +1489,22 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) } switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: options = RTL_R8(tp, Config1) & ~PMEnable; if (wolopts) options |= PMEnable; RTL_W8(tp, Config1, options); break; - default: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_51: options = RTL_R8(tp, Config2) & ~PME_SIGNAL; if (wolopts) options |= PME_SIGNAL; RTL_W8(tp, Config2, options); break; + default: + break; } rtl_lock_config_regs(tp); @@ -1505,6 +1584,13 @@ static int rtl8169_set_features(struct net_device *dev, else rx_config &= ~(AcceptErr | AcceptRunt); + if (rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rx_config |= RX_VLAN_8125; + else + rx_config &= ~RX_VLAN_8125; + } + RTL_W32(tp, RxConfig, rx_config); if (features & NETIF_F_RXCSUM) @@ -1512,10 +1598,12 @@ static int rtl8169_set_features(struct net_device *dev, else tp->cp_cmd &= ~RxChkSum; - if (features & NETIF_F_HW_VLAN_CTAG_RX) - tp->cp_cmd |= RxVlan; - else - tp->cp_cmd &= ~RxVlan; + if (!rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + } RTL_W16(tp, CPlusCmd, tp->cp_cmd); RTL_R16(tp, CPlusCmd); @@ -1814,6 +1902,9 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) int i; u16 w; + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + memset(ec, 0, sizeof(*ec)); /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ @@ -1882,6 +1973,9 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) u16 w = 0, cp01; int i; + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + scale = rtl_coalesce_choose_scale(dev, max(p[0].usecs, p[1].usecs) * 1000, &cp01); if (IS_ERR(scale)) @@ -1929,144 +2023,40 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) return 0; } -static int rtl_get_eee_supp(struct rtl8169_private *tp) -{ - struct phy_device *phydev = tp->phydev; - int ret; - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_38: - ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); - break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - ret = phy_read_paged(phydev, 0x0a5c, 0x12); - break; - default: - ret = -EPROTONOSUPPORT; - break; - } - - return ret; -} - -static int rtl_get_eee_lpadv(struct rtl8169_private *tp) -{ - struct phy_device *phydev = tp->phydev; - int ret; - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_38: - ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); - break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - ret = phy_read_paged(phydev, 0x0a5d, 0x11); - break; - default: - ret = -EPROTONOSUPPORT; - break; - } - - return ret; -} - -static int rtl_get_eee_adv(struct rtl8169_private *tp) -{ - struct phy_device *phydev = tp->phydev; - int ret; - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_38: - ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); - break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - ret = phy_read_paged(phydev, 0x0a5d, 0x10); - break; - default: - ret = -EPROTONOSUPPORT; - break; - } - - return ret; -} - -static int rtl_set_eee_adv(struct rtl8169_private *tp, int val) -{ - struct phy_device *phydev = tp->phydev; - int ret = 0; - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_38: - ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); - break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - phy_write_paged(phydev, 0x0a5d, 0x10, val); - break; - default: - ret = -EPROTONOSUPPORT; - break; - } - - return ret; -} - static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) { struct rtl8169_private *tp = netdev_priv(dev); struct device *d = tp_to_dev(tp); int ret; + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + pm_runtime_get_noresume(d); if (!pm_runtime_active(d)) { ret = -EOPNOTSUPP; - goto out; + } else { + ret = phy_ethtool_get_eee(tp->phydev, data); } - /* Get Supported EEE */ - ret = rtl_get_eee_supp(tp); - if (ret < 0) - goto out; - data->supported = mmd_eee_cap_to_ethtool_sup_t(ret); - - /* Get advertisement EEE */ - ret = rtl_get_eee_adv(tp); - if (ret < 0) - goto out; - data->advertised = mmd_eee_adv_to_ethtool_adv_t(ret); - data->eee_enabled = !!data->advertised; - - /* Get LP advertisement EEE */ - ret = rtl_get_eee_lpadv(tp); - if (ret < 0) - goto out; - data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(ret); - data->eee_active = !!(data->advertised & data->lp_advertised); -out: pm_runtime_put_noidle(d); - return ret < 0 ? ret : 0; + + return ret; } static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) { struct rtl8169_private *tp = netdev_priv(dev); struct device *d = tp_to_dev(tp); - int old_adv, adv = 0, cap, ret; + int ret; + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; pm_runtime_get_noresume(d); - if (!dev->phydev || !pm_runtime_active(d)) { + if (!pm_runtime_active(d)) { ret = -EOPNOTSUPP; goto out; } @@ -2077,38 +2067,10 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) goto out; } - /* Get Supported EEE */ - ret = rtl_get_eee_supp(tp); - if (ret < 0) - goto out; - cap = ret; - - ret = rtl_get_eee_adv(tp); - if (ret < 0) - goto out; - old_adv = ret; - - if (data->eee_enabled) { - adv = !data->advertised ? cap : - ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap; - /* Mask prohibited EEE modes */ - adv &= ~dev->phydev->eee_broken_modes; - } - - if (old_adv != adv) { - ret = rtl_set_eee_adv(tp, adv); - if (ret < 0) - goto out; - - /* Restart autonegotiation so the new modes get sent to the - * link partner. - */ - ret = phy_restart_aneg(dev->phydev); - } - + ret = phy_ethtool_set_eee(tp->phydev, data); out: pm_runtime_put_noidle(d); - return ret < 0 ? ret : 0; + return ret; } static const struct ethtool_ops rtl8169_ethtool_ops = { @@ -2135,10 +2097,11 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { static void rtl_enable_eee(struct rtl8169_private *tp) { - int supported = rtl_get_eee_supp(tp); + struct phy_device *phydev = tp->phydev; + int supported = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); if (supported > 0) - rtl_set_eee_adv(tp, supported); + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, supported); } static void rtl8169_get_mac_version(struct rtl8169_private *tp) @@ -2159,6 +2122,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp) u16 val; u16 mac_version; } mac_info[] = { + /* 8125 family. */ + { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, + { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + /* 8168EP family. */ { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, @@ -2304,6 +2271,12 @@ static void rtl8168_config_eee_mac(struct rtl8169_private *tp) rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003); } +static void rtl8125_config_eee_mac(struct rtl8169_private *tp) +{ + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); + r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); +} + static void rtl8168f_config_eee_phy(struct rtl8169_private *tp) { struct phy_device *phydev = tp->phydev; @@ -2324,6 +2297,26 @@ static void rtl8168g_config_eee_phy(struct rtl8169_private *tp) phy_modify_paged(tp->phydev, 0x0a43, 0x11, 0, BIT(4)); } +static void rtl8168h_config_eee_phy(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + rtl8168g_config_eee_phy(tp); + + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080); +} + +static void rtl8125_config_eee_phy(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + rtl8168h_config_eee_phy(tp); + + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); +} + static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) { static const struct phy_reg phy_reg_init[] = { @@ -3391,7 +3384,7 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0); rtl8168g_disable_aldps(tp); - rtl8168g_config_eee_phy(tp); + rtl8168h_config_eee_phy(tp); rtl_enable_eee(tp); } @@ -3644,6 +3637,134 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); } +static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100); + phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000); + phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400); + phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff); + phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x80ea); + phy_modify(phydev, 0x14, 0xff00, 0xc400); + phy_write(phydev, 0x13, 0x80eb); + phy_modify(phydev, 0x14, 0x0700, 0x0300); + phy_write(phydev, 0x13, 0x80f8); + phy_modify(phydev, 0x14, 0xff00, 0x1c00); + phy_write(phydev, 0x13, 0x80f1); + phy_modify(phydev, 0x14, 0xff00, 0x3000); + phy_write(phydev, 0x13, 0x80fe); + phy_modify(phydev, 0x14, 0xff00, 0xa500); + phy_write(phydev, 0x13, 0x8102); + phy_modify(phydev, 0x14, 0xff00, 0x5000); + phy_write(phydev, 0x13, 0x8105); + phy_modify(phydev, 0x14, 0xff00, 0x3300); + phy_write(phydev, 0x13, 0x8100); + phy_modify(phydev, 0x14, 0xff00, 0x7000); + phy_write(phydev, 0x13, 0x8104); + phy_modify(phydev, 0x14, 0xff00, 0xf000); + phy_write(phydev, 0x13, 0x8106); + phy_modify(phydev, 0x14, 0xff00, 0x6500); + phy_write(phydev, 0x13, 0x80dc); + phy_modify(phydev, 0x14, 0xff00, 0xed00); + phy_write(phydev, 0x13, 0x80df); + phy_set_bits(phydev, 0x14, BIT(8)); + phy_write(phydev, 0x13, 0x80e1); + phy_clear_bits(phydev, 0x14, BIT(8)); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038); + phy_write_paged(phydev, 0xa43, 0x13, 0x819f); + phy_write_paged(phydev, 0xa43, 0x14, 0xd0b6); + + phy_write_paged(phydev, 0xbc3, 0x12, 0x5555); + phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00); + phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000); + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + + rtl8125_config_eee_phy(tp); + rtl_enable_eee(tp); +} + +static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int i; + + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000); + phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002); + phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044); + phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000); + phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002); + phy_write_paged(phydev, 0xad4, 0x16, 0x00a8); + phy_write_paged(phydev, 0xac5, 0x16, 0x01ff); + phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80a2); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x16, 0x809c); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x81B3); + phy_write(phydev, 0x14, 0x0043); + phy_write(phydev, 0x14, 0x00A7); + phy_write(phydev, 0x14, 0x00D6); + phy_write(phydev, 0x14, 0x00EC); + phy_write(phydev, 0x14, 0x00F6); + phy_write(phydev, 0x14, 0x00FB); + phy_write(phydev, 0x14, 0x00FD); + phy_write(phydev, 0x14, 0x00FF); + phy_write(phydev, 0x14, 0x00BB); + phy_write(phydev, 0x14, 0x0058); + phy_write(phydev, 0x14, 0x0029); + phy_write(phydev, 0x14, 0x0013); + phy_write(phydev, 0x14, 0x0009); + phy_write(phydev, 0x14, 0x0004); + phy_write(phydev, 0x14, 0x0002); + for (i = 0; i < 25; i++) + phy_write(phydev, 0x14, 0x0000); + + phy_write(phydev, 0x13, 0x8257); + phy_write(phydev, 0x14, 0x020F); + + phy_write(phydev, 0x13, 0x80EA); + phy_write(phydev, 0x14, 0x7843); + phy_write(phydev, 0x1f, 0x0000); + + rtl_apply_firmware(tp); + + phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x81a2); + phy_set_bits(phydev, 0x14, BIT(8)); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00); + phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + + rtl8125_config_eee_phy(tp); + rtl_enable_eee(tp); +} + static void rtl_hw_phy_config(struct net_device *dev) { static const rtl_generic_fct phy_configs[] = { @@ -3699,6 +3820,8 @@ static void rtl_hw_phy_config(struct net_device *dev) [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config, [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config, [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config, + [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config, }; struct rtl8169_private *tp = netdev_priv(dev); @@ -3826,6 +3949,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_48: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_60: + case RTL_GIGA_MAC_VER_61: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; case RTL_GIGA_MAC_VER_40: @@ -3855,6 +3980,8 @@ static void rtl_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_48: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_60: + case RTL_GIGA_MAC_VER_61: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); break; case RTL_GIGA_MAC_VER_40: @@ -3887,6 +4014,10 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_VLAN_8125 | + RX_DMA_BURST); + break; default: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); break; @@ -4146,54 +4277,46 @@ static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_versi static void rtl_set_rx_mode(struct net_device *dev) { + u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast; + /* Multicast hash filter */ + u32 mc_filter[2] = { 0xffffffff, 0xffffffff }; struct rtl8169_private *tp = netdev_priv(dev); - u32 mc_filter[2]; /* Multicast hash filter */ - int rx_mode; - u32 tmp = 0; + u32 tmp; if (dev->flags & IFF_PROMISC) { /* Unconditionally log net taps. */ netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); - rx_mode = - AcceptBroadcast | AcceptMulticast | AcceptMyPhys | - AcceptAllPhys; - mc_filter[1] = mc_filter[0] = 0xffffffff; - } else if ((netdev_mc_count(dev) > multicast_filter_limit) || - (dev->flags & IFF_ALLMULTI)) { - /* Too many to filter perfectly -- accept all multicasts. */ - rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; - mc_filter[1] = mc_filter[0] = 0xffffffff; + rx_mode |= AcceptAllPhys; + } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || + dev->flags & IFF_ALLMULTI || + tp->mac_version == RTL_GIGA_MAC_VER_35) { + /* accept all multicasts */ + } else if (netdev_mc_empty(dev)) { + rx_mode &= ~AcceptMulticast; } else { struct netdev_hw_addr *ha; - rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; netdev_for_each_mc_addr(ha, dev) { - int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; - mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - rx_mode |= AcceptMulticast; + u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31); + } + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); } } if (dev->features & NETIF_F_RXALL) rx_mode |= (AcceptErr | AcceptRunt); - tmp = (RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; - - if (tp->mac_version > RTL_GIGA_MAC_VER_06) { - u32 data = mc_filter[0]; - - mc_filter[0] = swab32(mc_filter[1]); - mc_filter[1] = swab32(data); - } - - if (tp->mac_version == RTL_GIGA_MAC_VER_35) - mc_filter[1] = mc_filter[0] = 0xffffffff; - RTL_W32(tp, MAR0 + 4, mc_filter[1]); RTL_W32(tp, MAR0 + 0, mc_filter[0]); - RTL_W32(tp, RxConfig, tmp); + tmp = RTL_R32(tp, RxConfig); + RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_MASK) | rx_mode); } DECLARE_RTL_COND(rtl_csiar_cond) @@ -4407,7 +4530,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) { static const struct ephy_info e_info_8168c_2[] = { { 0x01, 0, 0x0001 }, - { 0x03, 0x0400, 0x0220 } + { 0x03, 0x0400, 0x0020 } }; rtl_set_def_aspm_entry_latency(tp); @@ -4454,7 +4577,8 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) static const struct ephy_info e_info_8168d_4[] = { { 0x0b, 0x0000, 0x0048 }, { 0x19, 0x0020, 0x0050 }, - { 0x0c, 0x0100, 0x0020 } + { 0x0c, 0x0100, 0x0020 }, + { 0x10, 0x0004, 0x0000 }, }; rtl_set_def_aspm_entry_latency(tp); @@ -4504,7 +4628,9 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) { static const struct ephy_info e_info_8168e_2[] = { { 0x09, 0x0000, 0x0080 }, - { 0x19, 0x0000, 0x0224 } + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, }; rtl_set_def_aspm_entry_latency(tp); @@ -4566,7 +4692,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) { 0x06, 0x00c0, 0x0020 }, { 0x08, 0x0001, 0x0002 }, { 0x09, 0x0000, 0x0080 }, - { 0x19, 0x0000, 0x0224 } + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, }; rtl_hw_start_8168f(tp); @@ -4581,8 +4709,9 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) static const struct ephy_info e_info_8168f_1[] = { { 0x06, 0x00c0, 0x0020 }, { 0x0f, 0xffff, 0x5200 }, - { 0x1e, 0x0000, 0x4000 }, - { 0x19, 0x0000, 0x0224 } + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, }; rtl_hw_start_8168f(tp); @@ -4621,8 +4750,8 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) { static const struct ephy_info e_info_8168g_1[] = { - { 0x00, 0x0000, 0x0008 }, - { 0x0c, 0x37d0, 0x0820 }, + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, { 0x1e, 0x0000, 0x0001 }, { 0x19, 0x8000, 0x0000 } }; @@ -4638,10 +4767,15 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) { static const struct ephy_info e_info_8168g_2[] = { - { 0x00, 0x0000, 0x0008 }, - { 0x0c, 0x3df0, 0x0200 }, - { 0x19, 0xffff, 0xfc00 }, - { 0x1e, 0xffff, 0x20eb } + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 }, + { 0x00, 0xffff, 0x10a3 }, + { 0x06, 0xffff, 0xf050 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x4000, 0x0000 }, }; rtl_hw_start_8168g(tp); @@ -4655,11 +4789,16 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) static void rtl_hw_start_8411_2(struct rtl8169_private *tp) { static const struct ephy_info e_info_8411_2[] = { - { 0x00, 0x0000, 0x0008 }, - { 0x0c, 0x3df0, 0x0200 }, - { 0x0f, 0xffff, 0x5200 }, - { 0x19, 0x0020, 0x0000 }, - { 0x1e, 0x0000, 0x2000 } + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x00, 0x0000, 0x0080 }, + { 0x06, 0x0000, 0x0010 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x0000, 0x4000 }, }; rtl_hw_start_8168g(tp); @@ -4809,16 +4948,15 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp) static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) { - int rg_saw_cnt; - u32 data; static const struct ephy_info e_info_8168h_1[] = { { 0x1e, 0x0800, 0x0001 }, { 0x1d, 0x0000, 0x0800 }, { 0x05, 0xffff, 0x2089 }, { 0x06, 0xffff, 0x5881 }, - { 0x04, 0xffff, 0x154a }, + { 0x04, 0xffff, 0x854a }, { 0x01, 0xffff, 0x068b } }; + int rg_saw_cnt; /* disable aspm and clock request before access ephy */ rtl_hw_aspm_clkreq_enable(tp, false); @@ -4863,31 +5001,13 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) sw_cnt_1ms_ini = 16000000/rg_saw_cnt; sw_cnt_1ms_ini &= 0x0fff; - data = r8168_mac_ocp_read(tp, 0xd412); - data &= ~0x0fff; - data |= sw_cnt_1ms_ini; - r8168_mac_ocp_write(tp, 0xd412, data); + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); } - data = r8168_mac_ocp_read(tp, 0xe056); - data &= ~0xf0; - data |= 0x70; - r8168_mac_ocp_write(tp, 0xe056, data); - - data = r8168_mac_ocp_read(tp, 0xe052); - data &= ~0x6000; - data |= 0x8008; - r8168_mac_ocp_write(tp, 0xe052, data); - - data = r8168_mac_ocp_read(tp, 0xe0d6); - data &= ~0x01ff; - data |= 0x017f; - r8168_mac_ocp_write(tp, 0xe0d6, data); - - data = r8168_mac_ocp_read(tp, 0xd420); - data &= ~0x0fff; - data |= 0x047f; - r8168_mac_ocp_write(tp, 0xd420, data); + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008); + r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); r8168_mac_ocp_write(tp, 0xe63e, 0x0001); r8168_mac_ocp_write(tp, 0xe63e, 0x0000); @@ -4969,12 +5089,11 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) { - u32 data; static const struct ephy_info e_info_8168ep_3[] = { - { 0x00, 0xffff, 0x10a3 }, - { 0x19, 0xffff, 0x7c00 }, - { 0x1e, 0xffff, 0x20eb }, - { 0x0d, 0xffff, 0x1666 } + { 0x00, 0x0000, 0x0080 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, }; /* disable aspm and clock request before access ephy */ @@ -4986,18 +5105,9 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); - data = r8168_mac_ocp_read(tp, 0xd3e2); - data &= 0xf000; - data |= 0x0271; - r8168_mac_ocp_write(tp, 0xd3e2, data); - - data = r8168_mac_ocp_read(tp, 0xd3e4); - data &= 0xff00; - r8168_mac_ocp_write(tp, 0xd3e4, data); - - data = r8168_mac_ocp_read(tp, 0xe860); - data |= 0x0080; - r8168_mac_ocp_write(tp, 0xe860, data); + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); rtl_hw_aspm_clkreq_enable(tp, true); } @@ -5125,6 +5235,128 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) rtl_hw_aspm_clkreq_enable(tp, true); } +DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond) +{ + return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13); +} + +static void rtl_hw_start_8125_common(struct rtl8169_private *tp) +{ + rtl_pcie_state_l2l3_disable(tp); + + RTL_W16(tp, 0x382, 0x221b); + RTL_W8(tp, 0x4500, 0); + RTL_W16(tp, 0x4800, 0); + + /* disable UPS */ + r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10); + + r8168_mac_ocp_write(tp, 0xc140, 0xffff); + r8168_mac_ocp_write(tp, 0xc142, 0xffff); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + /* disable new tx descriptor format */ + r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); + + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); + r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); + r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); + r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); + r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0067); + r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00); + r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); + r8168_mac_ocp_modify(tp, 0xe84c, 0x0000, 0x00c0); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001); + udelay(1); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000); + RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030); + + r8168_mac_ocp_write(tp, 0xe098, 0xc302); + + rtl_udelay_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); + + rtl8125_config_eee_mac(tp); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + udelay(10); +} + +static void rtl_hw_start_8125_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125_1[] = { + { 0x01, 0xffff, 0xa812 }, + { 0x09, 0xffff, 0x520c }, + { 0x04, 0xffff, 0xd000 }, + { 0x0d, 0xffff, 0xf702 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x06, 0xffff, 0x001e }, + { 0x08, 0xffff, 0x3595 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x02, 0xffff, 0x6046 }, + { 0x29, 0xffff, 0xfe00 }, + { 0x23, 0xffff, 0xab62 }, + + { 0x41, 0xffff, 0xa80c }, + { 0x49, 0xffff, 0x520c }, + { 0x44, 0xffff, 0xd000 }, + { 0x4d, 0xffff, 0xf702 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x46, 0xffff, 0x001e }, + { 0x48, 0xffff, 0x3595 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x42, 0xffff, 0x6046 }, + { 0x69, 0xffff, 0xfe00 }, + { 0x63, 0xffff, 0xab62 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125_1); + + rtl_hw_start_8125_common(tp); +} + +static void rtl_hw_start_8125_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125_2[] = { + { 0x04, 0xffff, 0xd000 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x23, 0xffff, 0xab66 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x29, 0xffff, 0xfe04 }, + + { 0x44, 0xffff, 0xd000 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x63, 0xffff, 0xab66 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x69, 0xffff, 0xfe04 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125_2); + + rtl_hw_start_8125_common(tp); +} + static void rtl_hw_config(struct rtl8169_private *tp) { static const rtl_generic_fct hw_configs[] = { @@ -5173,12 +5405,25 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1, [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2, [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125_1, + [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125_2, }; if (hw_configs[tp->mac_version]) hw_configs[tp->mac_version](tp); } +static void rtl_hw_start_8125(struct rtl8169_private *tp) +{ + int i; + + /* disable interrupt coalescing */ + for (i = 0xa00; i < 0xb00; i += 4) + RTL_W32(tp, i, 0); + + rtl_hw_config(tp); +} + static void rtl_hw_start_8168(struct rtl8169_private *tp) { if (tp->mac_version == RTL_GIGA_MAC_VER_13 || @@ -5192,6 +5437,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) RTL_W8(tp, MaxTxPacketSize, TxPacketMax); rtl_hw_config(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); } static void rtl_hw_start_8169(struct rtl8169_private *tp) @@ -5215,6 +5463,9 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) rtl8169_set_magic_reg(tp, tp->mac_version); RTL_W32(tp, RxMissed, 0); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); } static void rtl_hw_start(struct rtl8169_private *tp) @@ -5226,6 +5477,8 @@ static void rtl_hw_start(struct rtl8169_private *tp) if (tp->mac_version <= RTL_GIGA_MAC_VER_06) rtl_hw_start_8169(tp); + else if (rtl_is_8125(tp)) + rtl_hw_start_8125(tp); else rtl_hw_start_8168(tp); @@ -5233,17 +5486,12 @@ static void rtl_hw_start(struct rtl8169_private *tp) rtl_set_rx_tx_desc_registers(tp); rtl_lock_config_regs(tp); - /* disable interrupt coalescing */ - RTL_W16(tp, IntrMitigate, 0x0000); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ - RTL_R8(tp, IntrMask); + RTL_R16(tp, CPlusCmd); RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); rtl_init_rxcfg(tp); rtl_set_tx_config_registers(tp); - rtl_set_rx_mode(tp->dev); - /* no early-rx interrupts */ - RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); rtl_irq_enable(tp); } @@ -5268,17 +5516,6 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); } -static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, - void **data_buff, struct RxDesc *desc) -{ - dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), - R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); - - kfree(*data_buff); - *data_buff = NULL; - rtl8169_make_unusable_by_asic(desc); -} - static inline void rtl8169_mark_to_asic(struct RxDesc *desc) { u32 eor = le32_to_cpu(desc->opts1) & RingEnd; @@ -5289,49 +5526,43 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc) desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE); } -static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, - struct RxDesc *desc) +static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) { - void *data; - dma_addr_t mapping; struct device *d = tp_to_dev(tp); int node = dev_to_node(d); + dma_addr_t mapping; + struct page *data; - data = kmalloc_node(R8169_RX_BUF_SIZE, GFP_KERNEL, node); + data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE)); if (!data) return NULL; - /* Memory should be properly aligned, but better check. */ - if (!IS_ALIGNED((unsigned long)data, 8)) { - netdev_err_once(tp->dev, "RX buffer not 8-byte-aligned\n"); - goto err_out; - } - - mapping = dma_map_single(d, data, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(d, mapping))) { if (net_ratelimit()) netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n"); - goto err_out; + __free_pages(data, get_order(R8169_RX_BUF_SIZE)); + return NULL; } desc->addr = cpu_to_le64(mapping); rtl8169_mark_to_asic(desc); - return data; -err_out: - kfree(data); - return NULL; + return data; } static void rtl8169_rx_clear(struct rtl8169_private *tp) { unsigned int i; - for (i = 0; i < NUM_RX_DESC; i++) { - if (tp->Rx_databuff[i]) { - rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i, - tp->RxDescArray + i); - } + for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) { + dma_unmap_page(tp_to_dev(tp), + le64_to_cpu(tp->RxDescArray[i].addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE)); + tp->Rx_databuff[i] = NULL; + rtl8169_make_unusable_by_asic(tp->RxDescArray + i); } } @@ -5345,7 +5576,7 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp) unsigned int i; for (i = 0; i < NUM_RX_DESC; i++) { - void *data; + struct page *data; data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); if (!data) { @@ -5507,44 +5738,6 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; } -static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, - struct net_device *dev); -/* r8169_csum_workaround() - * The hw limites the value the transport offset. When the offset is out of the - * range, calculate the checksum by sw. - */ -static void r8169_csum_workaround(struct rtl8169_private *tp, - struct sk_buff *skb) -{ - if (skb_is_gso(skb)) { - netdev_features_t features = tp->dev->features; - struct sk_buff *segs, *nskb; - - features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); - segs = skb_gso_segment(skb, features); - if (IS_ERR(segs) || !segs) - goto drop; - - do { - nskb = segs; - segs = segs->next; - nskb->next = NULL; - rtl8169_start_xmit(nskb, tp->dev); - } while (segs); - - dev_consume_skb_any(skb); - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (skb_checksum_help(skb) < 0) - goto drop; - - rtl8169_start_xmit(skb, tp->dev); - } else { -drop: - tp->dev->stats.tx_dropped++; - dev_kfree_skb_any(skb); - } -} - /* msdn_giant_send_check() * According to the document of microsoft, the TCP Pseudo Header excludes the * packet length for IPv6 TCP large packets. @@ -5594,13 +5787,6 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, u32 mss = skb_shinfo(skb)->gso_size; if (mss) { - if (transport_offset > GTTCPHO_MAX) { - netif_warn(tp, tx_err, tp->dev, - "Invalid transport offset 0x%x for TSO\n", - transport_offset); - return false; - } - switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts[0] |= TD1_GTSENV4; @@ -5623,16 +5809,6 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, } else if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 ip_protocol; - if (unlikely(rtl_test_hw_pad_bug(tp, skb))) - return !(skb_checksum_help(skb) || eth_skb_pad(skb)); - - if (transport_offset > TCPHO_MAX) { - netif_warn(tp, tx_err, tp->dev, - "Invalid transport offset 0x%x\n", - transport_offset); - return false; - } - switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts[1] |= TD1_IPv4_CS; @@ -5686,6 +5862,14 @@ static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) } } +static void rtl8169_doorbell(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W16(tp, TxPoll_8125, BIT(0)); + else + RTL_W8(tp, TxPoll, NPQ); +} + static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -5695,6 +5879,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, struct device *d = tp_to_dev(tp); dma_addr_t mapping; u32 opts[2], len; + bool stop_queue; + bool door_bell; int frags; if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { @@ -5709,10 +5895,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, opts[0] = DescOwn; if (rtl_chip_supports_csum_v2(tp)) { - if (!rtl8169_tso_csum_v2(tp, skb, opts)) { - r8169_csum_workaround(tp, skb); - return NETDEV_TX_OK; - } + if (!rtl8169_tso_csum_v2(tp, skb, opts)) + goto err_dma_0; } else { rtl8169_tso_csum_v1(skb, opts); } @@ -5740,13 +5924,13 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); - netdev_sent_queue(dev, skb->len); - skb_tx_timestamp(skb); /* Force memory writes to complete before releasing descriptor */ dma_wmb(); + door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry); /* Force all memory writes to complete before notifying device */ @@ -5754,14 +5938,20 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->cur_tx += frags + 1; - RTL_W8(tp, TxPoll, NPQ); - - if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { + stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); + if (unlikely(stop_queue)) { /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must * not miss a ring update when it notices a stopped queue. */ smp_wmb(); netif_stop_queue(dev); + door_bell = true; + } + + if (door_bell) + rtl8169_doorbell(tp); + + if (unlikely(stop_queue)) { /* Sync with rtl_tx: * - publish queue status and cur_tx ring index (write barrier) * - refresh dirty_tx ring index (read barrier). @@ -5789,6 +5979,39 @@ err_stop_0: return NETDEV_TX_BUSY; } +static netdev_features_t rtl8169_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + int transport_offset = skb_transport_offset(skb); + struct rtl8169_private *tp = netdev_priv(dev); + + if (skb_is_gso(skb)) { + if (transport_offset > GTTCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_ALL_TSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->len < ETH_ZLEN) { + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_34: + features &= ~NETIF_F_CSUM_MASK; + break; + default: + break; + } + } + + if (transport_offset > TCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_CSUM_MASK; + } + + return vlan_features_check(skb, features); +} + static void rtl8169_pcierr_interrupt(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -5850,7 +6073,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb, tp->TxDescArray + entry); - if (status & LastFrag) { + if (tx_skb->skb) { pkts_compl++; bytes_compl += tx_skb->skb->len; napi_consume_skb(tx_skb->skb, budget); @@ -5888,7 +6111,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, * it is slow enough). -- FR */ if (tp->cur_tx != dirty_tx) - RTL_W8(tp, TxPoll, NPQ); + rtl8169_doorbell(tp); } } @@ -5908,24 +6131,6 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) skb_checksum_none_assert(skb); } -static struct sk_buff *rtl8169_try_rx_copy(void *data, - struct rtl8169_private *tp, - int pkt_size, - dma_addr_t addr) -{ - struct sk_buff *skb; - struct device *d = tp_to_dev(tp); - - dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); - prefetch(data); - skb = napi_alloc_skb(&tp->napi, pkt_size); - if (skb) - skb_copy_to_linear_data(skb, data, pkt_size); - dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); - - return skb; -} - static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) { unsigned int cur_rx, rx_left; @@ -5935,6 +6140,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { unsigned int entry = cur_rx % NUM_RX_DESC; + const void *rx_buf = page_address(tp->Rx_databuff[entry]); struct RxDesc *desc = tp->RxDescArray + entry; u32 status; @@ -5961,17 +6167,13 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget goto process_pkt; } } else { + unsigned int pkt_size; struct sk_buff *skb; - dma_addr_t addr; - int pkt_size; process_pkt: - addr = le64_to_cpu(desc->addr); + pkt_size = status & GENMASK(13, 0); if (likely(!(dev->features & NETIF_F_RXFCS))) - pkt_size = (status & 0x00003fff) - 4; - else - pkt_size = status & 0x00003fff; - + pkt_size -= ETH_FCS_LEN; /* * The driver does not support incoming fragmented * frames. They are seen as a symptom of over-mtu @@ -5983,15 +6185,25 @@ process_pkt: goto release_descriptor; } - skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], - tp, pkt_size, addr); - if (!skb) { + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (unlikely(!skb)) { dev->stats.rx_dropped++; goto release_descriptor; } + dma_sync_single_for_cpu(tp_to_dev(tp), + le64_to_cpu(desc->addr), + pkt_size, DMA_FROM_DEVICE); + prefetch(rx_buf); + skb_copy_to_linear_data(skb, rx_buf, pkt_size); + skb->tail += pkt_size; + skb->len = pkt_size; + + dma_sync_single_for_device(tp_to_dev(tp), + le64_to_cpu(desc->addr), + pkt_size, DMA_FROM_DEVICE); + rtl8169_rx_csum(skb, status); - skb_put(skb, pkt_size); skb->protocol = eth_type_trans(skb, dev); rtl8169_rx_vlan_tag(desc, skb); @@ -6020,9 +6232,10 @@ release_descriptor: static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { struct rtl8169_private *tp = dev_instance; - u16 status = RTL_R16(tp, IntrStatus); + u32 status = rtl_get_events(tp); - if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask)) + if (!tp->irq_enabled || (status & 0xffff) == 0xffff || + !(status & tp->irq_mask)) return IRQ_NONE; if (unlikely(status & SYSErr)) { @@ -6332,7 +6545,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->multicast = dev->stats.multicast; /* - * Fetch additonal counter values missing in stats collected by driver + * Fetch additional counter values missing in stats collected by driver * from tally counters. */ if (pm_runtime_active(&pdev->dev)) @@ -6556,6 +6769,7 @@ static const struct net_device_ops rtl_netdev_ops = { .ndo_stop = rtl8169_close, .ndo_get_stats64 = rtl8169_get_stats64, .ndo_start_xmit = rtl8169_start_xmit, + .ndo_features_check = rtl8169_features_check, .ndo_tx_timeout = rtl8169_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = rtl8169_change_mtu, @@ -6619,6 +6833,8 @@ static void rtl_read_mac_address(struct rtl8169_private *tp, value = rtl_eri_read(tp, 0xe4); mac_addr[4] = (value >> 0) & 0xff; mac_addr[5] = (value >> 8) & 0xff; + } else if (rtl_is_8125(tp)) { + rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP); } } @@ -6692,8 +6908,6 @@ static int r8169_mdio_register(struct rtl8169_private *tp) static void rtl_hw_init_8168g(struct rtl8169_private *tp) { - u32 data; - tp->ocp_base = OCP_STD_PHY_BASE; RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); @@ -6708,16 +6922,37 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp) msleep(1); RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); - data = r8168_mac_ocp_read(tp, 0xe8de); - data &= ~(1 << 14); - r8168_mac_ocp_write(tp, 0xe8de, data); + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) return; - data = r8168_mac_ocp_read(tp, 0xe8de); - data |= (1 << 15); - r8168_mac_ocp_write(tp, 0xe8de, data); + r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15)); + + rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42); +} + +static void rtl_hw_init_8125(struct rtl8169_private *tp) +{ + tp->ocp_base = OCP_STD_PHY_BASE; + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42)) + return; + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) + return; + + r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0); + r8168_mac_ocp_write(tp, 0xc0a6, 0x0150); + r8168_mac_ocp_write(tp, 0xc01e, 0x5555); rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42); } @@ -6731,6 +6966,9 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + rtl_hw_init_8125(tp); + break; default: break; } @@ -6794,7 +7032,7 @@ static void rtl_init_mac_address(struct rtl8169_private *tp) { struct net_device *dev = tp->dev; u8 *mac_addr = dev->dev_addr; - int rc, i; + int rc; rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); if (!rc) @@ -6804,8 +7042,7 @@ static void rtl_init_mac_address(struct rtl8169_private *tp) if (is_valid_ether_addr(mac_addr)) goto done; - for (i = 0; i < ETH_ALEN; i++) - mac_addr[i] = RTL_R8(tp, MAC0 + i); + rtl_read_mac_from_reg(tp, mac_addr, MAC0); if (is_valid_ether_addr(mac_addr)) goto done; @@ -6917,11 +7154,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); - /* don't enable SG, IP_CSUM and TSO by default - it might not work - * properly for all devices */ - dev->features |= NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - + dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; @@ -6929,8 +7164,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HIGHDMA; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - tp->cp_cmd |= RxChkSum | RxVlan; - + tp->cp_cmd |= RxChkSum; + /* RTL8125 uses register RxConfig for VLAN offloading config */ + if (!rtl_is_8125(tp)) + tp->cp_cmd |= RxVlan; /* * Pretend we are using VLANs; This bypasses a nasty bug where * Interrupts stop flowing on high load on 8110SCd controllers. @@ -6939,8 +7176,22 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Disallow toggling */ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; - if (rtl_chip_supports_csum_v2(tp)) + if (rtl_chip_supports_csum_v2(tp)) { dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; + } else { + dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; + } + + /* RTL8168e-vl has a HW issue with TSO */ + if (tp->mac_version == RTL_GIGA_MAC_VER_34) { + dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); + dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); + dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); + } dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS; diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index ac9195add811..a9c89d5d8898 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -193,16 +193,12 @@ enum ravb_reg { GECMR = 0x05b0, MAHR = 0x05c0, MALR = 0x05c8, - TROCR = 0x0700, /* Undocumented? */ - CDCR = 0x0708, /* Undocumented? */ - LCCR = 0x0710, /* Undocumented? */ + TROCR = 0x0700, /* R-Car Gen3 only */ CEFCR = 0x0740, FRECR = 0x0748, TSFRCR = 0x0750, TLFRCR = 0x0758, RFCR = 0x0760, - CERCR = 0x0768, /* Undocumented? */ - CEECR = 0x0770, /* Undocumented? */ MAFCR = 0x0778, }; @@ -220,7 +216,6 @@ enum CCC_BIT { CCC_CSEL_HPB = 0x00010000, CCC_CSEL_ETH_TX = 0x00020000, CCC_CSEL_GMII_REF = 0x00030000, - CCC_BOC = 0x00100000, /* Undocumented? */ CCC_LBME = 0x01000000, }; @@ -317,7 +312,7 @@ enum UFCD_BIT { /* SFO */ enum SFO_BIT { - SFO_FPB = 0x0000003F, + SFO_FBP = 0x0000003F, }; /* RTC */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 6cacd5e893ac..de9aa8c47f1c 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -447,12 +447,6 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_ring_format(ndev, RAVB_BE); ravb_ring_format(ndev, RAVB_NC); -#if defined(__LITTLE_ENDIAN) - ravb_modify(ndev, CCC, CCC_BOC, 0); -#else - ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC); -#endif - /* Set AVB RX */ ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR); @@ -1627,17 +1621,10 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev) stats0 = &priv->stats[RAVB_BE]; stats1 = &priv->stats[RAVB_NC]; - nstats->tx_dropped += ravb_read(ndev, TROCR); - ravb_write(ndev, 0, TROCR); /* (write clear) */ - nstats->collisions += ravb_read(ndev, CDCR); - ravb_write(ndev, 0, CDCR); /* (write clear) */ - nstats->tx_carrier_errors += ravb_read(ndev, LCCR); - ravb_write(ndev, 0, LCCR); /* (write clear) */ - - nstats->tx_carrier_errors += ravb_read(ndev, CERCR); - ravb_write(ndev, 0, CERCR); /* (write clear) */ - nstats->tx_carrier_errors += ravb_read(ndev, CEECR); - ravb_write(ndev, 0, CEECR); /* (write clear) */ + if (priv->chip_id == RCAR_GEN3) { + nstats->tx_dropped += ravb_read(ndev, TROCR); + ravb_write(ndev, 0, TROCR); /* (write clear) */ + } nstats->rx_packets = stats0->rx_packets + stats1->rx_packets; nstats->tx_packets = stats0->tx_packets + stats1->tx_packets; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 2c5d3f5b84dd..786b158bd305 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2189,6 +2189,9 @@ static int rocker_router_fib_event(struct notifier_block *nb, struct rocker_fib_event_work *fib_work; struct fib_notifier_info *info = ptr; + if (!net_eq(info->net, &init_net)) + return NOTIFY_DONE; + if (info->family != AF_INET) return NOTIFY_DONE; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index d2c48116f181..2412c87561e0 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -78,7 +78,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev) { int ret; int i, chan; - struct resource *res; struct device *dev = &pdev->dev; void __iomem *addr; struct sxgbe_priv_data *priv = NULL; @@ -88,8 +87,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; /* Get memory resource */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - addr = devm_ioremap_resource(dev, res); + addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) return PTR_ERR(addr); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 16d6952c312a..0ec13f520e90 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -508,7 +508,7 @@ static ssize_t efx_ef10_show_link_control_flag(struct device *dev, struct device_attribute *attr, char *buf) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); return sprintf(buf, "%d\n", ((efx->mcdi->fn_flags) & @@ -520,7 +520,7 @@ static ssize_t efx_ef10_show_primary_flag(struct device *dev, struct device_attribute *attr, char *buf) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); return sprintf(buf, "%d\n", ((efx->mcdi->fn_flags) & diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index ab58b837df47..2fef7402233e 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2517,7 +2517,7 @@ static struct notifier_block efx_netdev_notifier = { static ssize_t show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); return sprintf(buf, "%d\n", efx->phy_type); } static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); @@ -2526,7 +2526,7 @@ static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr, char *buf) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); struct efx_mcdi_iface *mcdi = efx_mcdi(efx); return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled); @@ -2534,7 +2534,7 @@ static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr, static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); struct efx_mcdi_iface *mcdi = efx_mcdi(efx); bool enable = count > 0 && *buf != '0'; @@ -3654,7 +3654,7 @@ static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) static int efx_pm_freeze(struct device *dev) { - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); rtnl_lock(); @@ -3675,7 +3675,7 @@ static int efx_pm_freeze(struct device *dev) static int efx_pm_thaw(struct device *dev) { int rc; - struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_nic *efx = dev_get_drvdata(dev); rtnl_lock(); diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 9b15c39ac670..eecc348b1c32 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2256,7 +2256,7 @@ static struct notifier_block ef4_netdev_notifier = { static ssize_t show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) { - struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef4_nic *efx = dev_get_drvdata(dev); return sprintf(buf, "%d\n", efx->phy_type); } static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); @@ -2999,7 +2999,7 @@ static int ef4_pci_probe(struct pci_dev *pci_dev, static int ef4_pm_freeze(struct device *dev) { - struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef4_nic *efx = dev_get_drvdata(dev); rtnl_lock(); @@ -3020,7 +3020,7 @@ static int ef4_pm_freeze(struct device *dev) static int ef4_pm_thaw(struct device *dev) { int rc; - struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef4_nic *efx = dev_get_drvdata(dev); rtnl_lock(); diff --git a/drivers/net/ethernet/sfc/falcon/falcon_boards.c b/drivers/net/ethernet/sfc/falcon/falcon_boards.c index 839189dab98e..605f486fa675 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon_boards.c +++ b/drivers/net/ethernet/sfc/falcon/falcon_boards.c @@ -357,7 +357,7 @@ fail_on: static ssize_t show_phy_flash_cfg(struct device *dev, struct device_attribute *attr, char *buf) { - struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef4_nic *efx = dev_get_drvdata(dev); return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL)); } @@ -365,7 +365,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef4_nic *efx = dev_get_drvdata(dev); enum ef4_phy_mode old_mode, new_mode; int err; @@ -454,13 +454,13 @@ static int sfe4001_init(struct ef4_nic *efx) #if IS_ENABLED(CONFIG_SENSORS_LM90) board->hwmon_client = - i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info); + i2c_new_client_device(&board->i2c_adap, &sfe4001_hwmon_info); #else board->hwmon_client = - i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr); + i2c_new_dummy_device(&board->i2c_adap, sfe4001_hwmon_info.addr); #endif - if (!board->hwmon_client) - return -EIO; + if (IS_ERR(board->hwmon_client)) + return PTR_ERR(board->hwmon_client); /* Raise board/PHY high limit from 85 to 90 degrees Celsius */ rc = i2c_smbus_write_byte_data(board->hwmon_client, @@ -468,9 +468,9 @@ static int sfe4001_init(struct ef4_nic *efx) if (rc) goto fail_hwmon; - board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539); - if (!board->ioexp_client) { - rc = -EIO; + board->ioexp_client = i2c_new_dummy_device(&board->i2c_adap, PCA9539); + if (IS_ERR(board->ioexp_client)) { + rc = PTR_ERR(board->ioexp_client); goto fail_hwmon; } diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c index fd850d3d8ec0..05ea3523890a 100644 --- a/drivers/net/ethernet/sfc/falcon/rx.c +++ b/drivers/net/ethernet/sfc/falcon/rx.c @@ -424,7 +424,6 @@ ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf, unsigned int n_frags, u8 *eh) { struct napi_struct *napi = &channel->napi_str; - gro_result_t gro_result; struct ef4_nic *efx = channel->efx; struct sk_buff *skb; @@ -460,9 +459,7 @@ ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf, skb_record_rx_queue(skb, channel->rx_queue.core_index); - gro_result = napi_gro_frags(napi); - if (gro_result != GRO_DROP) - channel->irq_mod_score += 2; + napi_gro_frags(napi); } /* Allocate and construct an SKB around page fragments */ diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index d5db045535d3..85ec07f5a674 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -412,7 +412,6 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, unsigned int n_frags, u8 *eh) { struct napi_struct *napi = &channel->napi_str; - gro_result_t gro_result; struct efx_nic *efx = channel->efx; struct sk_buff *skb; @@ -449,9 +448,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, skb_record_rx_queue(skb, channel->rx_queue.core_index); - gro_result = napi_gro_frags(napi); - if (gro_result != GRO_DROP) - channel->irq_mod_score += 2; + napi_gro_frags(napi); } /* Allocate and construct an SKB around page fragments */ diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 31ec56091a5d..65e81ec1b314 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -274,7 +274,7 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, vaddr = kmap_atomic(skb_frag_page(f)); - efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, + efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f), skb_frag_size(f), copy_buf); kunmap_atomic(vaddr); } diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 358e66b81926..deb636d653f3 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -1,9 +1,5 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. +// SPDX-License-Identifier: GPL-2.0 +/* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. * * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc. @@ -15,11 +11,8 @@ * * To do: * - * o Handle allocation failures in ioc3_alloc_skb() more gracefully. - * o Handle allocation failures in ioc3_init_rings(). * o Use prefetching for large packets. What is a good lower limit for * prefetching? - * o We're probably allocating a bit too much memory. * o Use hardware checksums. * o Convert to using a IOC3 meta driver. * o Which PHYs might possibly be attached to the IOC3 in real live, @@ -39,10 +32,10 @@ #include <linux/crc32.h> #include <linux/mii.h> #include <linux/in.h> +#include <linux/io.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> -#include <linux/dma-mapping.h> #include <linux/gfp.h> #ifdef CONFIG_SERIAL_8250 @@ -55,32 +48,52 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> +#include <linux/dma-direct.h> + #include <net/ip.h> #include <asm/byteorder.h> -#include <asm/io.h> #include <asm/pgtable.h> #include <linux/uaccess.h> #include <asm/sn/types.h> #include <asm/sn/ioc3.h> #include <asm/pci/bridge.h> -/* - * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The - * value must be a power of two. +/* Number of RX buffers. This is tunable in the range of 16 <= x < 512. + * The value must be a power of two. */ -#define RX_BUFFS 64 +#define RX_BUFFS 64 +#define RX_RING_ENTRIES 512 /* fixed in hardware */ +#define RX_RING_MASK (RX_RING_ENTRIES - 1) +#define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64)) + +/* 128 TX buffers (not tunable) */ +#define TX_RING_ENTRIES 128 +#define TX_RING_MASK (TX_RING_ENTRIES - 1) +#define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd)) + +/* IOC3 does dma transfers in 128 byte blocks */ +#define IOC3_DMA_XFER_LEN 128UL + +/* Every RX buffer starts with 8 byte descriptor data */ +#define RX_OFFSET (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN) +#define RX_BUF_SIZE (13 * IOC3_DMA_XFER_LEN) -#define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21) -#define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21) +#define ETCSR_FD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21) +#define ETCSR_HD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21) /* Private per NIC data of the driver. */ struct ioc3_private { - struct ioc3 *regs; + struct ioc3_ethregs *regs; + struct ioc3 *all_regs; + struct device *dma_dev; + u32 *ssram; unsigned long *rxr; /* pointer to receiver ring */ struct ioc3_etxd *txr; - struct sk_buff *rx_skbs[512]; - struct sk_buff *tx_skbs[128]; + dma_addr_t rxr_dma; + dma_addr_t txr_dma; + struct sk_buff *rx_skbs[RX_RING_ENTRIES]; + struct sk_buff *tx_skbs[TX_RING_ENTRIES]; int rx_ci; /* RX consumer index */ int rx_pi; /* RX producer index */ int tx_ci; /* TX consumer index */ @@ -102,190 +115,138 @@ static void ioc3_set_multicast_list(struct net_device *dev); static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); static void ioc3_timeout(struct net_device *dev); static inline unsigned int ioc3_hash(const unsigned char *addr); +static void ioc3_start(struct ioc3_private *ip); static inline void ioc3_stop(struct ioc3_private *ip); static void ioc3_init(struct net_device *dev); +static int ioc3_alloc_rx_bufs(struct net_device *dev); +static void ioc3_free_rx_bufs(struct ioc3_private *ip); +static inline void ioc3_clean_tx_ring(struct ioc3_private *ip); static const char ioc3_str[] = "IOC3 Ethernet"; static const struct ethtool_ops ioc3_ethtool_ops; -/* We use this to acquire receive skb's that we can DMA directly into. */ - -#define IOC3_CACHELINE 128UL static inline unsigned long aligned_rx_skb_addr(unsigned long addr) { - return (~addr + 1) & (IOC3_CACHELINE - 1UL); + return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL); } -static inline struct sk_buff * ioc3_alloc_skb(unsigned long length, - unsigned int gfp_mask) +static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb, + struct ioc3_erxbuf **rxb, dma_addr_t *rxb_dma) { - struct sk_buff *skb; + struct sk_buff *new_skb; + dma_addr_t d; + int offset; + + new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC); + if (!new_skb) + return -ENOMEM; + + /* ensure buffer is aligned to IOC3_DMA_XFER_LEN */ + offset = aligned_rx_skb_addr((unsigned long)new_skb->data); + if (offset) + skb_reserve(new_skb, offset); - skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask); - if (likely(skb)) { - int offset = aligned_rx_skb_addr((unsigned long) skb->data); - if (offset) - skb_reserve(skb, offset); + d = dma_map_single(ip->dma_dev, new_skb->data, + RX_BUF_SIZE, DMA_FROM_DEVICE); + + if (dma_mapping_error(ip->dma_dev, d)) { + dev_kfree_skb_any(new_skb); + return -ENOMEM; } + *rxb_dma = d; + *rxb = (struct ioc3_erxbuf *)new_skb->data; + skb_reserve(new_skb, RX_OFFSET); + *skb = new_skb; - return skb; + return 0; } -static inline unsigned long ioc3_map(void *ptr, unsigned long vdev) +#ifdef CONFIG_PCI_XTALK_BRIDGE +static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) { -#ifdef CONFIG_SGI_IP27 - vdev <<= 57; /* Shift to PCI64_ATTR_VIRTUAL */ + return (addr & ~PCI64_ATTR_BAR) | attr; +} - return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF | - ((unsigned long)ptr & TO_PHYS_MASK); +#define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT) #else - return virt_to_bus(ptr); -#endif +static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) +{ + return addr; } -/* BEWARE: The IOC3 documentation documents the size of rx buffers as - 1644 while it's actually 1664. This one was nasty to track down ... */ -#define RX_OFFSET 10 -#define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE) - -/* DMA barrier to separate cached and uncached accesses. */ -#define BARRIER() \ - __asm__("sync" ::: "memory") - +#define ERBAR_VAL 0 +#endif #define IOC3_SIZE 0x100000 -/* - * IOC3 is a big endian device - * - * Unorthodox but makes the users of these macros more readable - the pointer - * to the IOC3's memory mapped registers is expected as struct ioc3 * ioc3 - * in the environment. - */ -#define ioc3_r_mcr() be32_to_cpu(ioc3->mcr) -#define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0) -#define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0) -#define ioc3_r_emcr() be32_to_cpu(ioc3->emcr) -#define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0) -#define ioc3_r_eisr() be32_to_cpu(ioc3->eisr) -#define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0) -#define ioc3_r_eier() be32_to_cpu(ioc3->eier) -#define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0) -#define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr) -#define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0) -#define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h) -#define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0) -#define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l) -#define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0) -#define ioc3_r_erbar() be32_to_cpu(ioc3->erbar) -#define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0) -#define ioc3_r_ercir() be32_to_cpu(ioc3->ercir) -#define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0) -#define ioc3_r_erpir() be32_to_cpu(ioc3->erpir) -#define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0) -#define ioc3_r_ertr() be32_to_cpu(ioc3->ertr) -#define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0) -#define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr) -#define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0) -#define ioc3_r_ersr() be32_to_cpu(ioc3->ersr) -#define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0) -#define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc) -#define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0) -#define ioc3_r_ebir() be32_to_cpu(ioc3->ebir) -#define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0) -#define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h) -#define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0) -#define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l) -#define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0) -#define ioc3_r_etcir() be32_to_cpu(ioc3->etcir) -#define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0) -#define ioc3_r_etpir() be32_to_cpu(ioc3->etpir) -#define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0) -#define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h) -#define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0) -#define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l) -#define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0) -#define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h) -#define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0) -#define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l) -#define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0) -#define ioc3_r_micr() be32_to_cpu(ioc3->micr) -#define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0) -#define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r) -#define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0) -#define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w) -#define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0) - static inline u32 mcr_pack(u32 pulse, u32 sample) { return (pulse << 10) | (sample << 2); } -static int nic_wait(struct ioc3 *ioc3) +static int nic_wait(u32 __iomem *mcr) { - u32 mcr; + u32 m; - do { - mcr = ioc3_r_mcr(); - } while (!(mcr & 2)); + do { + m = readl(mcr); + } while (!(m & 2)); - return mcr & 1; + return m & 1; } -static int nic_reset(struct ioc3 *ioc3) +static int nic_reset(u32 __iomem *mcr) { - int presence; + int presence; - ioc3_w_mcr(mcr_pack(500, 65)); - presence = nic_wait(ioc3); + writel(mcr_pack(500, 65), mcr); + presence = nic_wait(mcr); - ioc3_w_mcr(mcr_pack(0, 500)); - nic_wait(ioc3); + writel(mcr_pack(0, 500), mcr); + nic_wait(mcr); - return presence; + return presence; } -static inline int nic_read_bit(struct ioc3 *ioc3) +static inline int nic_read_bit(u32 __iomem *mcr) { int result; - ioc3_w_mcr(mcr_pack(6, 13)); - result = nic_wait(ioc3); - ioc3_w_mcr(mcr_pack(0, 100)); - nic_wait(ioc3); + writel(mcr_pack(6, 13), mcr); + result = nic_wait(mcr); + writel(mcr_pack(0, 100), mcr); + nic_wait(mcr); return result; } -static inline void nic_write_bit(struct ioc3 *ioc3, int bit) +static inline void nic_write_bit(u32 __iomem *mcr, int bit) { if (bit) - ioc3_w_mcr(mcr_pack(6, 110)); + writel(mcr_pack(6, 110), mcr); else - ioc3_w_mcr(mcr_pack(80, 30)); + writel(mcr_pack(80, 30), mcr); - nic_wait(ioc3); + nic_wait(mcr); } -/* - * Read a byte from an iButton device +/* Read a byte from an iButton device */ -static u32 nic_read_byte(struct ioc3 *ioc3) +static u32 nic_read_byte(u32 __iomem *mcr) { u32 result = 0; int i; for (i = 0; i < 8; i++) - result = (result >> 1) | (nic_read_bit(ioc3) << 7); + result = (result >> 1) | (nic_read_bit(mcr) << 7); return result; } -/* - * Write a byte to an iButton device +/* Write a byte to an iButton device */ -static void nic_write_byte(struct ioc3 *ioc3, int byte) +static void nic_write_byte(u32 __iomem *mcr, int byte) { int i, bit; @@ -293,26 +254,26 @@ static void nic_write_byte(struct ioc3 *ioc3, int byte) bit = byte & 1; byte >>= 1; - nic_write_bit(ioc3, bit); + nic_write_bit(mcr, bit); } } -static u64 nic_find(struct ioc3 *ioc3, int *last) +static u64 nic_find(u32 __iomem *mcr, int *last) { int a, b, index, disc; u64 address = 0; - nic_reset(ioc3); + nic_reset(mcr); /* Search ROM. */ - nic_write_byte(ioc3, 0xf0); + nic_write_byte(mcr, 0xf0); /* Algorithm from ``Book of iButton Standards''. */ for (index = 0, disc = 0; index < 64; index++) { - a = nic_read_bit(ioc3); - b = nic_read_bit(ioc3); + a = nic_read_bit(mcr); + b = nic_read_bit(mcr); if (a && b) { - printk("NIC search failed (not fatal).\n"); + pr_warn("NIC search failed (not fatal).\n"); *last = 0; return 0; } @@ -323,16 +284,17 @@ static u64 nic_find(struct ioc3 *ioc3, int *last) } else if (index > *last) { address &= ~(1UL << index); disc = index; - } else if ((address & (1UL << index)) == 0) + } else if ((address & (1UL << index)) == 0) { disc = index; - nic_write_bit(ioc3, address & (1UL << index)); + } + nic_write_bit(mcr, address & (1UL << index)); continue; } else { if (a) address |= 1UL << index; else address &= ~(1UL << index); - nic_write_bit(ioc3, a); + nic_write_bit(mcr, a); continue; } } @@ -342,7 +304,7 @@ static u64 nic_find(struct ioc3 *ioc3, int *last) return address; } -static int nic_init(struct ioc3 *ioc3) +static int nic_init(u32 __iomem *mcr) { const char *unknown = "unknown"; const char *type = unknown; @@ -352,7 +314,8 @@ static int nic_init(struct ioc3 *ioc3) while (1) { u64 reg; - reg = nic_find(ioc3, &save); + + reg = nic_find(mcr, &save); switch (reg & 0xff) { case 0x91: @@ -366,12 +329,12 @@ static int nic_init(struct ioc3 *ioc3) continue; } - nic_reset(ioc3); + nic_reset(mcr); /* Match ROM. */ - nic_write_byte(ioc3, 0x55); + nic_write_byte(mcr, 0x55); for (i = 0; i < 8; i++) - nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff); + nic_write_byte(mcr, (reg >> (i << 3)) & 0xff); reg >>= 8; /* Shift out type. */ for (i = 0; i < 6; i++) { @@ -382,52 +345,50 @@ static int nic_init(struct ioc3 *ioc3) break; } - printk("Found %s NIC", type); + pr_info("Found %s NIC", type); if (type != unknown) - printk (" registration number %pM, CRC %02x", serial, crc); - printk(".\n"); + pr_cont(" registration number %pM, CRC %02x", serial, crc); + pr_cont(".\n"); return 0; } -/* - * Read the NIC (Number-In-a-Can) device used to store the MAC address on +/* Read the NIC (Number-In-a-Can) device used to store the MAC address on * SN0 / SN00 nodeboards and PCI cards. */ static void ioc3_get_eaddr_nic(struct ioc3_private *ip) { - struct ioc3 *ioc3 = ip->regs; - u8 nic[14]; + u32 __iomem *mcr = &ip->all_regs->mcr; int tries = 2; /* There may be some problem with the battery? */ + u8 nic[14]; int i; - ioc3_w_gpcr_s(1 << 21); + writel(1 << 21, &ip->all_regs->gpcr_s); while (tries--) { - if (!nic_init(ioc3)) + if (!nic_init(mcr)) break; udelay(500); } if (tries < 0) { - printk("Failed to read MAC address\n"); + pr_err("Failed to read MAC address\n"); return; } /* Read Memory. */ - nic_write_byte(ioc3, 0xf0); - nic_write_byte(ioc3, 0x00); - nic_write_byte(ioc3, 0x00); + nic_write_byte(mcr, 0xf0); + nic_write_byte(mcr, 0x00); + nic_write_byte(mcr, 0x00); for (i = 13; i >= 0; i--) - nic[i] = nic_read_byte(ioc3); + nic[i] = nic_read_byte(mcr); for (i = 2; i < 8; i++) ip->dev->dev_addr[i - 2] = nic[i]; } -/* - * Ok, this is hosed by design. It's necessary to know what machine the +/* Ok, this is hosed by design. It's necessary to know what machine the * NIC is in in order to know how to read the NIC address. We also have * to know if it's a PCI card or a NIC in on the node board ... */ @@ -435,17 +396,21 @@ static void ioc3_get_eaddr(struct ioc3_private *ip) { ioc3_get_eaddr_nic(ip); - printk("Ethernet address is %pM.\n", ip->dev->dev_addr); + pr_info("Ethernet address is %pM.\n", ip->dev->dev_addr); } static void __ioc3_set_mac_address(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); - struct ioc3 *ioc3 = ip->regs; - ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]); - ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | - (dev->dev_addr[1] << 8) | dev->dev_addr[0]); + writel((dev->dev_addr[5] << 8) | + dev->dev_addr[4], + &ip->regs->emar_h); + writel((dev->dev_addr[3] << 24) | + (dev->dev_addr[2] << 16) | + (dev->dev_addr[1] << 8) | + dev->dev_addr[0], + &ip->regs->emar_l); } static int ioc3_set_mac_address(struct net_device *dev, void *addr) @@ -462,31 +427,35 @@ static int ioc3_set_mac_address(struct net_device *dev, void *addr) return 0; } -/* - * Caller must hold the ioc3_lock ever for MII readers. This is also +/* Caller must hold the ioc3_lock ever for MII readers. This is also * used to protect the transmitter side but it's low contention. */ static int ioc3_mdio_read(struct net_device *dev, int phy, int reg) { struct ioc3_private *ip = netdev_priv(dev); - struct ioc3 *ioc3 = ip->regs; + struct ioc3_ethregs *regs = ip->regs; - while (ioc3_r_micr() & MICR_BUSY); - ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG); - while (ioc3_r_micr() & MICR_BUSY); + while (readl(®s->micr) & MICR_BUSY) + ; + writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG, + ®s->micr); + while (readl(®s->micr) & MICR_BUSY) + ; - return ioc3_r_midr_r() & MIDR_DATA_MASK; + return readl(®s->midr_r) & MIDR_DATA_MASK; } static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data) { struct ioc3_private *ip = netdev_priv(dev); - struct ioc3 *ioc3 = ip->regs; - - while (ioc3_r_micr() & MICR_BUSY); - ioc3_w_midr_w(data); - ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg); - while (ioc3_r_micr() & MICR_BUSY); + struct ioc3_ethregs *regs = ip->regs; + + while (readl(®s->micr) & MICR_BUSY) + ; + writel(data, ®s->midr_w); + writel((phy << MICR_PHYADDR_SHIFT) | reg, ®s->micr); + while (readl(®s->micr) & MICR_BUSY) + ; } static int ioc3_mii_init(struct ioc3_private *ip); @@ -494,23 +463,22 @@ static int ioc3_mii_init(struct ioc3_private *ip); static struct net_device_stats *ioc3_get_stats(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); - struct ioc3 *ioc3 = ip->regs; + struct ioc3_ethregs *regs = ip->regs; - dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK); + dev->stats.collisions += readl(®s->etcdc) & ETCDC_COLLCNT_MASK; return &dev->stats; } -static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) +static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len) { struct ethhdr *eh = eth_hdr(skb); - uint32_t csum, ehsum; unsigned int proto; - struct iphdr *ih; - uint16_t *ew; unsigned char *cp; + struct iphdr *ih; + u32 csum, ehsum; + u16 *ew; - /* - * Did hardware handle the checksum at all? The cases we can handle + /* Did hardware handle the checksum at all? The cases we can handle * are: * * - TCP and UDP checksums of IPv4 only. @@ -526,7 +494,7 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) if (eh->h_proto != htons(ETH_P_IP)) return; - ih = (struct iphdr *) ((char *)eh + ETH_HLEN); + ih = (struct iphdr *)((char *)eh + ETH_HLEN); if (ip_is_fragment(ih)) return; @@ -537,12 +505,12 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) /* Same as tx - compute csum of pseudo header */ csum = hwsum + (ih->tot_len - (ih->ihl << 2)) + - htons((uint16_t)ih->protocol) + + htons((u16)ih->protocol) + (ih->saddr >> 16) + (ih->saddr & 0xffff) + (ih->daddr >> 16) + (ih->daddr & 0xffff); /* Sum up ethernet dest addr, src addr and protocol */ - ew = (uint16_t *) eh; + ew = (u16 *)eh; ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6]; ehsum = (ehsum & 0xffff) + (ehsum >> 16); @@ -551,14 +519,15 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) csum += 0xffff ^ ehsum; /* In the next step we also subtract the 1's complement - checksum of the trailing ethernet CRC. */ + * checksum of the trailing ethernet CRC. + */ cp = (char *)eh + len; /* points at trailing CRC */ if (len & 1) { - csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]); - csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]); + csum += 0xffff ^ (u16)((cp[1] << 8) | cp[0]); + csum += 0xffff ^ (u16)((cp[3] << 8) | cp[2]); } else { - csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]); - csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]); + csum += 0xffff ^ (u16)((cp[0] << 8) | cp[1]); + csum += 0xffff ^ (u16)((cp[2] << 8) | cp[3]); } csum = (csum & 0xffff) + (csum >> 16); @@ -572,10 +541,10 @@ static inline void ioc3_rx(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); struct sk_buff *skb, *new_skb; - struct ioc3 *ioc3 = ip->regs; int rx_entry, n_entry, len; struct ioc3_erxbuf *rxb; unsigned long *rxr; + dma_addr_t d; u32 w0, err; rxr = ip->rxr; /* Ring base */ @@ -583,64 +552,67 @@ static inline void ioc3_rx(struct net_device *dev) n_entry = ip->rx_pi; skb = ip->rx_skbs[rx_entry]; - rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); + rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); w0 = be32_to_cpu(rxb->w0); while (w0 & ERXBUF_V) { err = be32_to_cpu(rxb->err); /* It's valid ... */ if (err & ERXBUF_GOODPKT) { len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4; - skb_trim(skb, len); + skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); - new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); - if (!new_skb) { + if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) { /* Ouch, drop packet and just recycle packet - to keep the ring filled. */ + * to keep the ring filled. + */ dev->stats.rx_dropped++; new_skb = skb; + d = rxr[rx_entry]; goto next; } if (likely(dev->features & NETIF_F_RXCSUM)) ioc3_tcpudp_checksum(skb, - w0 & ERXBUF_IPCKSUM_MASK, len); + w0 & ERXBUF_IPCKSUM_MASK, + len); + + dma_unmap_single(ip->dma_dev, rxr[rx_entry], + RX_BUF_SIZE, DMA_FROM_DEVICE); netif_rx(skb); ip->rx_skbs[rx_entry] = NULL; /* Poison */ - /* Because we reserve afterwards. */ - skb_put(new_skb, (1664 + RX_OFFSET)); - rxb = (struct ioc3_erxbuf *) new_skb->data; - skb_reserve(new_skb, RX_OFFSET); - dev->stats.rx_packets++; /* Statistics */ dev->stats.rx_bytes += len; } else { /* The frame is invalid and the skb never - reached the network layer so we can just - recycle it. */ + * reached the network layer so we can just + * recycle it. + */ new_skb = skb; + d = rxr[rx_entry]; dev->stats.rx_errors++; } if (err & ERXBUF_CRCERR) /* Statistics */ dev->stats.rx_crc_errors++; if (err & ERXBUF_FRAMERR) dev->stats.rx_frame_errors++; + next: ip->rx_skbs[n_entry] = new_skb; - rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1)); + rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); rxb->w0 = 0; /* Clear valid flag */ - n_entry = (n_entry + 1) & 511; /* Update erpir */ + n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */ /* Now go on to the next ring entry. */ - rx_entry = (rx_entry + 1) & 511; + rx_entry = (rx_entry + 1) & RX_RING_MASK; skb = ip->rx_skbs[rx_entry]; - rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); + rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); w0 = be32_to_cpu(rxb->w0); } - ioc3_w_erpir((n_entry << 3) | ERPIR_ARM); + writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir); ip->rx_pi = n_entry; ip->rx_ci = rx_entry; } @@ -648,16 +620,16 @@ next: static inline void ioc3_tx(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); + struct ioc3_ethregs *regs = ip->regs; unsigned long packets, bytes; - struct ioc3 *ioc3 = ip->regs; int tx_entry, o_entry; struct sk_buff *skb; u32 etcir; spin_lock(&ip->ioc3_lock); - etcir = ioc3_r_etcir(); + etcir = readl(®s->etcir); - tx_entry = (etcir >> 7) & 127; + tx_entry = (etcir >> 7) & TX_RING_MASK; o_entry = ip->tx_ci; packets = 0; bytes = 0; @@ -669,25 +641,24 @@ static inline void ioc3_tx(struct net_device *dev) dev_consume_skb_irq(skb); ip->tx_skbs[o_entry] = NULL; - o_entry = (o_entry + 1) & 127; /* Next */ + o_entry = (o_entry + 1) & TX_RING_MASK; /* Next */ - etcir = ioc3_r_etcir(); /* More pkts sent? */ - tx_entry = (etcir >> 7) & 127; + etcir = readl(®s->etcir); /* More pkts sent? */ + tx_entry = (etcir >> 7) & TX_RING_MASK; } dev->stats.tx_packets += packets; dev->stats.tx_bytes += bytes; ip->txqlen -= packets; - if (ip->txqlen < 128) + if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES) netif_wake_queue(dev); ip->tx_ci = o_entry; spin_unlock(&ip->ioc3_lock); } -/* - * Deal with fatal IOC3 errors. This condition might be caused by a hard or +/* Deal with fatal IOC3 errors. This condition might be caused by a hard or * software problems, so we should try to recover * more gracefully if this ever happens. In theory we might be flooded * with such error interrupts if something really goes wrong, so we might @@ -696,25 +667,33 @@ static inline void ioc3_tx(struct net_device *dev) static void ioc3_error(struct net_device *dev, u32 eisr) { struct ioc3_private *ip = netdev_priv(dev); - unsigned char *iface = dev->name; spin_lock(&ip->ioc3_lock); if (eisr & EISR_RXOFLO) - printk(KERN_ERR "%s: RX overflow.\n", iface); + net_err_ratelimited("%s: RX overflow.\n", dev->name); if (eisr & EISR_RXBUFOFLO) - printk(KERN_ERR "%s: RX buffer overflow.\n", iface); + net_err_ratelimited("%s: RX buffer overflow.\n", dev->name); if (eisr & EISR_RXMEMERR) - printk(KERN_ERR "%s: RX PCI error.\n", iface); + net_err_ratelimited("%s: RX PCI error.\n", dev->name); if (eisr & EISR_RXPARERR) - printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface); + net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name); if (eisr & EISR_TXBUFUFLO) - printk(KERN_ERR "%s: TX buffer underflow.\n", iface); + net_err_ratelimited("%s: TX buffer underflow.\n", dev->name); if (eisr & EISR_TXMEMERR) - printk(KERN_ERR "%s: TX PCI error.\n", iface); + net_err_ratelimited("%s: TX PCI error.\n", dev->name); ioc3_stop(ip); + ioc3_free_rx_bufs(ip); + ioc3_clean_tx_ring(ip); + ioc3_init(dev); + if (ioc3_alloc_rx_bufs(dev)) { + netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); + spin_unlock(&ip->ioc3_lock); + return; + } + ioc3_start(ip); ioc3_mii_init(ip); netif_wake_queue(dev); @@ -723,45 +702,45 @@ static void ioc3_error(struct net_device *dev, u32 eisr) } /* The interrupt handler does all of the Rx thread work and cleans up - after the Tx thread. */ -static irqreturn_t ioc3_interrupt(int irq, void *_dev) + * after the Tx thread. + */ +static irqreturn_t ioc3_interrupt(int irq, void *dev_id) { - struct net_device *dev = (struct net_device *)_dev; - struct ioc3_private *ip = netdev_priv(dev); - struct ioc3 *ioc3 = ip->regs; - const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | - EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | - EISR_TXEXPLICIT | EISR_TXMEMERR; + struct ioc3_private *ip = netdev_priv(dev_id); + struct ioc3_ethregs *regs = ip->regs; u32 eisr; - eisr = ioc3_r_eisr() & enabled; - - ioc3_w_eisr(eisr); - (void) ioc3_r_eisr(); /* Flush */ + eisr = readl(®s->eisr); + writel(eisr, ®s->eisr); + readl(®s->eisr); /* Flush */ if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR | - EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) - ioc3_error(dev, eisr); + EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) + ioc3_error(dev_id, eisr); if (eisr & EISR_RXTIMERINT) - ioc3_rx(dev); + ioc3_rx(dev_id); if (eisr & EISR_TXEXPLICIT) - ioc3_tx(dev); + ioc3_tx(dev_id); return IRQ_HANDLED; } static inline void ioc3_setup_duplex(struct ioc3_private *ip) { - struct ioc3 *ioc3 = ip->regs; + struct ioc3_ethregs *regs = ip->regs; + + spin_lock_irq(&ip->ioc3_lock); if (ip->mii.full_duplex) { - ioc3_w_etcsr(ETCSR_FD); + writel(ETCSR_FD, ®s->etcsr); ip->emcr |= EMCR_DUPLEX; } else { - ioc3_w_etcsr(ETCSR_HD); + writel(ETCSR_HD, ®s->etcsr); ip->emcr &= ~EMCR_DUPLEX; } - ioc3_w_emcr(ip->emcr); + writel(ip->emcr, ®s->emcr); + + spin_unlock_irq(&ip->ioc3_lock); } static void ioc3_timer(struct timer_list *t) @@ -772,12 +751,11 @@ static void ioc3_timer(struct timer_list *t) mii_check_media(&ip->mii, 1, 0); ioc3_setup_duplex(ip); - ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */ + ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */ add_timer(&ip->ioc3_timer); } -/* - * Try to find a PHY. There is no apparent relation between the MII addresses +/* Try to find a PHY. There is no apparent relation between the MII addresses * in the SGI documentation and what we find in reality, so we simply probe * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my * onboard IOC3s has the special oddity that probing doesn't seem to find it @@ -786,8 +764,8 @@ static void ioc3_timer(struct timer_list *t) */ static int ioc3_mii_init(struct ioc3_private *ip) { - int i, found = 0, res = 0; int ioc3_phy_workaround = 1; + int i, found = 0, res = 0; u16 word; for (i = 0; i < 32; i++) { @@ -800,9 +778,9 @@ static int ioc3_mii_init(struct ioc3_private *ip) } if (!found) { - if (ioc3_phy_workaround) + if (ioc3_phy_workaround) { i = 31; - else { + } else { ip->mii.phy_id = -1; res = -ENODEV; goto out; @@ -817,27 +795,27 @@ out: static void ioc3_mii_start(struct ioc3_private *ip) { - ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ + ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10; /* 1.2 sec. */ add_timer(&ip->ioc3_timer); } -static inline void ioc3_clean_rx_ring(struct ioc3_private *ip) +static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry) { - struct sk_buff *skb; - int i; - - for (i = ip->rx_ci; i & 15; i++) { - ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci]; - ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++]; + struct ioc3_etxd *desc; + u32 cmd, bufcnt, len; + + desc = &ip->txr[entry]; + cmd = be32_to_cpu(desc->cmd); + bufcnt = be32_to_cpu(desc->bufcnt); + if (cmd & ETXD_B1V) { + len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT; + dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1), + len, DMA_TO_DEVICE); } - ip->rx_pi &= 511; - ip->rx_ci &= 511; - - for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) { - struct ioc3_erxbuf *rxb; - skb = ip->rx_skbs[i]; - rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); - rxb->w0 = 0; + if (cmd & ETXD_B2V) { + len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT; + dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2), + len, DMA_TO_DEVICE); } } @@ -846,9 +824,10 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) struct sk_buff *skb; int i; - for (i=0; i < 128; i++) { + for (i = 0; i < TX_RING_ENTRIES; i++) { skb = ip->tx_skbs[i]; if (skb) { + ioc3_tx_unmap(ip, i); ip->tx_skbs[i] = NULL; dev_kfree_skb_any(skb); } @@ -858,179 +837,137 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) ip->tx_ci = 0; } -static void ioc3_free_rings(struct ioc3_private *ip) +static void ioc3_free_rx_bufs(struct ioc3_private *ip) { - struct sk_buff *skb; int rx_entry, n_entry; + struct sk_buff *skb; - if (ip->txr) { - ioc3_clean_tx_ring(ip); - free_pages((unsigned long)ip->txr, 2); - ip->txr = NULL; - } - - if (ip->rxr) { - n_entry = ip->rx_ci; - rx_entry = ip->rx_pi; - - while (n_entry != rx_entry) { - skb = ip->rx_skbs[n_entry]; - if (skb) - dev_kfree_skb_any(skb); + n_entry = ip->rx_ci; + rx_entry = ip->rx_pi; - n_entry = (n_entry + 1) & 511; + while (n_entry != rx_entry) { + skb = ip->rx_skbs[n_entry]; + if (skb) { + dma_unmap_single(ip->dma_dev, + be64_to_cpu(ip->rxr[n_entry]), + RX_BUF_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); } - free_page((unsigned long)ip->rxr); - ip->rxr = NULL; + n_entry = (n_entry + 1) & RX_RING_MASK; } } -static void ioc3_alloc_rings(struct net_device *dev) +static int ioc3_alloc_rx_bufs(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); struct ioc3_erxbuf *rxb; - unsigned long *rxr; + dma_addr_t d; int i; - if (ip->rxr == NULL) { - /* Allocate and initialize rx ring. 4kb = 512 entries */ - ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC); - rxr = ip->rxr; - if (!rxr) - printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n"); - - /* Now the rx buffers. The RX ring may be larger but - we only allocate 16 buffers for now. Need to tune - this for performance and memory later. */ - for (i = 0; i < RX_BUFFS; i++) { - struct sk_buff *skb; - - skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); - if (!skb) { - show_free_areas(0, NULL); - continue; - } - - ip->rx_skbs[i] = skb; - - /* Because we reserve afterwards. */ - skb_put(skb, (1664 + RX_OFFSET)); - rxb = (struct ioc3_erxbuf *) skb->data; - rxr[i] = cpu_to_be64(ioc3_map(rxb, 1)); - skb_reserve(skb, RX_OFFSET); - } - ip->rx_ci = 0; - ip->rx_pi = RX_BUFFS; - } + /* Now the rx buffers. The RX ring may be larger but + * we only allocate 16 buffers for now. Need to tune + * this for performance and memory later. + */ + for (i = 0; i < RX_BUFFS; i++) { + if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d)) + return -ENOMEM; - if (ip->txr == NULL) { - /* Allocate and initialize tx rings. 16kb = 128 bufs. */ - ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2); - if (!ip->txr) - printk("ioc3_alloc_rings(): __get_free_pages() failed!\n"); - ip->tx_pi = 0; - ip->tx_ci = 0; + rxb->w0 = 0; /* Clear valid flag */ + ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); } -} - -static void ioc3_init_rings(struct net_device *dev) -{ - struct ioc3_private *ip = netdev_priv(dev); - struct ioc3 *ioc3 = ip->regs; - unsigned long ring; - - ioc3_free_rings(ip); - ioc3_alloc_rings(dev); - - ioc3_clean_rx_ring(ip); - ioc3_clean_tx_ring(ip); + ip->rx_ci = 0; + ip->rx_pi = RX_BUFFS; - /* Now the rx ring base, consume & produce registers. */ - ring = ioc3_map(ip->rxr, 0); - ioc3_w_erbr_h(ring >> 32); - ioc3_w_erbr_l(ring & 0xffffffff); - ioc3_w_ercir(ip->rx_ci << 3); - ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM); - - ring = ioc3_map(ip->txr, 0); - - ip->txqlen = 0; /* nothing queued */ - - /* Now the tx ring base, consume & produce registers. */ - ioc3_w_etbr_h(ring >> 32); - ioc3_w_etbr_l(ring & 0xffffffff); - ioc3_w_etpir(ip->tx_pi << 7); - ioc3_w_etcir(ip->tx_ci << 7); - (void) ioc3_r_etcir(); /* Flush */ + return 0; } static inline void ioc3_ssram_disc(struct ioc3_private *ip) { - struct ioc3 *ioc3 = ip->regs; - volatile u32 *ssram0 = &ioc3->ssram[0x0000]; - volatile u32 *ssram1 = &ioc3->ssram[0x4000]; - unsigned int pattern = 0x5555; + struct ioc3_ethregs *regs = ip->regs; + u32 *ssram0 = &ip->ssram[0x0000]; + u32 *ssram1 = &ip->ssram[0x4000]; + u32 pattern = 0x5555; /* Assume the larger size SSRAM and enable parity checking */ - ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR)); + writel(readl(®s->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), ®s->emcr); + readl(®s->emcr); /* Flush */ - *ssram0 = pattern; - *ssram1 = ~pattern & IOC3_SSRAM_DM; + writel(pattern, ssram0); + writel(~pattern & IOC3_SSRAM_DM, ssram1); - if ((*ssram0 & IOC3_SSRAM_DM) != pattern || - (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) { + if ((readl(ssram0) & IOC3_SSRAM_DM) != pattern || + (readl(ssram1) & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) { /* set ssram size to 64 KB */ - ip->emcr = EMCR_RAMPAR; - ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ); - } else - ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR; + ip->emcr |= EMCR_RAMPAR; + writel(readl(®s->emcr) & ~EMCR_BUFSIZ, ®s->emcr); + } else { + ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR; + } } static void ioc3_init(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); - struct ioc3 *ioc3 = ip->regs; + struct ioc3_ethregs *regs = ip->regs; del_timer_sync(&ip->ioc3_timer); /* Kill if running */ - ioc3_w_emcr(EMCR_RST); /* Reset */ - (void) ioc3_r_emcr(); /* Flush WB */ + writel(EMCR_RST, ®s->emcr); /* Reset */ + readl(®s->emcr); /* Flush WB */ udelay(4); /* Give it time ... */ - ioc3_w_emcr(0); - (void) ioc3_r_emcr(); + writel(0, ®s->emcr); + readl(®s->emcr); /* Misc registers */ -#ifdef CONFIG_SGI_IP27 - ioc3_w_erbar(PCI64_ATTR_BAR >> 32); /* Barrier on last store */ -#else - ioc3_w_erbar(0); /* Let PCI API get it right */ -#endif - (void) ioc3_r_etcdc(); /* Clear on read */ - ioc3_w_ercsr(15); /* RX low watermark */ - ioc3_w_ertr(0); /* Interrupt immediately */ + writel(ERBAR_VAL, ®s->erbar); + readl(®s->etcdc); /* Clear on read */ + writel(15, ®s->ercsr); /* RX low watermark */ + writel(0, ®s->ertr); /* Interrupt immediately */ __ioc3_set_mac_address(dev); - ioc3_w_ehar_h(ip->ehar_h); - ioc3_w_ehar_l(ip->ehar_l); - ioc3_w_ersr(42); /* XXX should be random */ + writel(ip->ehar_h, ®s->ehar_h); + writel(ip->ehar_l, ®s->ehar_l); + writel(42, ®s->ersr); /* XXX should be random */ +} - ioc3_init_rings(dev); +static void ioc3_start(struct ioc3_private *ip) +{ + struct ioc3_ethregs *regs = ip->regs; + unsigned long ring; + + /* Now the rx ring base, consume & produce registers. */ + ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC); + writel(ring >> 32, ®s->erbr_h); + writel(ring & 0xffffffff, ®s->erbr_l); + writel(ip->rx_ci << 3, ®s->ercir); + writel((ip->rx_pi << 3) | ERPIR_ARM, ®s->erpir); + + ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC); + + ip->txqlen = 0; /* nothing queued */ + + /* Now the tx ring base, consume & produce registers. */ + writel(ring >> 32, ®s->etbr_h); + writel(ring & 0xffffffff, ®s->etbr_l); + writel(ip->tx_pi << 7, ®s->etpir); + writel(ip->tx_ci << 7, ®s->etcir); + readl(®s->etcir); /* Flush */ ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN | - EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN; - ioc3_w_emcr(ip->emcr); - ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | - EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | - EISR_TXEXPLICIT | EISR_TXMEMERR); - (void) ioc3_r_eier(); + EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN; + writel(ip->emcr, ®s->emcr); + writel(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | + EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | + EISR_TXEXPLICIT | EISR_TXMEMERR, ®s->eier); + readl(®s->eier); } static inline void ioc3_stop(struct ioc3_private *ip) { - struct ioc3 *ioc3 = ip->regs; + struct ioc3_ethregs *regs = ip->regs; - ioc3_w_emcr(0); /* Shutup */ - ioc3_w_eier(0); /* Disable interrupts */ - (void) ioc3_r_eier(); /* Flush */ + writel(0, ®s->emcr); /* Shutup */ + writel(0, ®s->eier); /* Disable interrupts */ + readl(®s->eier); /* Flush */ } static int ioc3_open(struct net_device *dev) @@ -1038,14 +975,20 @@ static int ioc3_open(struct net_device *dev) struct ioc3_private *ip = netdev_priv(dev); if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) { - printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); + netdev_err(dev, "Can't get irq %d\n", dev->irq); return -EAGAIN; } ip->ehar_h = 0; ip->ehar_l = 0; + ioc3_init(dev); + if (ioc3_alloc_rx_bufs(dev)) { + netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); + return -ENOMEM; + } + ioc3_start(ip); ioc3_mii_start(ip); netif_start_queue(dev); @@ -1063,12 +1006,13 @@ static int ioc3_close(struct net_device *dev) ioc3_stop(ip); free_irq(dev->irq, dev); - ioc3_free_rings(ip); + ioc3_free_rx_bufs(ip); + ioc3_clean_tx_ring(ip); + return 0; } -/* - * MENET cards have four IOC3 chips, which are attached to two sets of +/* MENET cards have four IOC3 chips, which are attached to two sets of * PCI slot resources each: the primary connections are on slots * 0..3 and the secondaries are on 4..7 * @@ -1085,7 +1029,7 @@ static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot) if (dev) { if (dev->vendor == PCI_VENDOR_ID_SGI && - dev->device == PCI_DEVICE_ID_SGI_IOC3) + dev->device == PCI_DEVICE_ID_SGI_IOC3) ret = 1; pci_dev_put(dev); } @@ -1095,15 +1039,14 @@ static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot) static int ioc3_is_menet(struct pci_dev *pdev) { - return pdev->bus->parent == NULL && + return !pdev->bus->parent && ioc3_adjacent_is_ioc3(pdev, 0) && ioc3_adjacent_is_ioc3(pdev, 1) && ioc3_adjacent_is_ioc3(pdev, 2); } #ifdef CONFIG_SERIAL_8250 -/* - * Note about serial ports and consoles: +/* Note about serial ports and consoles: * For console output, everyone uses the IOC3 UARTA (offset 0x178) * connected to the master node (look in ip27_setup_console() and * ip27prom_console_write()). @@ -1140,31 +1083,32 @@ static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart) #define COSMISC_CONSTANT 6 struct uart_8250_port port = { - .port = { + .port = { .irq = 0, .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, .iotype = UPIO_MEM, .regshift = 0, .uartclk = (22000000 << 1) / COSMISC_CONSTANT, - .membase = (unsigned char __iomem *) uart, - .mapbase = (unsigned long) uart, - } + .membase = (unsigned char __iomem *)uart, + .mapbase = (unsigned long)uart, + } }; unsigned char lcr; - lcr = uart->iu_lcr; - uart->iu_lcr = lcr | UART_LCR_DLAB; - uart->iu_scr = COSMISC_CONSTANT, - uart->iu_lcr = lcr; - uart->iu_lcr; + lcr = readb(&uart->iu_lcr); + writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr); + writeb(COSMISC_CONSTANT, &uart->iu_scr); + writeb(lcr, &uart->iu_lcr); + readb(&uart->iu_lcr); serial8250_register_8250_port(&port); } static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) { - /* - * We need to recognice and treat the fourth MENET serial as it + u32 sio_iec; + + /* We need to recognice and treat the fourth MENET serial as it * does not have an SuperIO chip attached to it, therefore attempting * to access it will result in bus errors. We call something an * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3 @@ -1175,33 +1119,34 @@ static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3) return; - /* - * Switch IOC3 to PIO mode. It probably already was but let's be + /* Switch IOC3 to PIO mode. It probably already was but let's be * paranoid */ - ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL; - ioc3->gpcr_s; - ioc3->gppr_6 = 0; - ioc3->gppr_6; - ioc3->gppr_7 = 0; - ioc3->gppr_7; - ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN; - ioc3->sscr_a; - ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN; - ioc3->sscr_b; + writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL, &ioc3->gpcr_s); + readl(&ioc3->gpcr_s); + writel(0, &ioc3->gppr[6]); + readl(&ioc3->gppr[6]); + writel(0, &ioc3->gppr[7]); + readl(&ioc3->gppr[7]); + writel(readl(&ioc3->port_a.sscr) & ~SSCR_DMA_EN, &ioc3->port_a.sscr); + readl(&ioc3->port_a.sscr); + writel(readl(&ioc3->port_b.sscr) & ~SSCR_DMA_EN, &ioc3->port_b.sscr); + readl(&ioc3->port_b.sscr); /* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */ - ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | - SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | - SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | - SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR); - ioc3->sio_iec |= SIO_IR_SA_INT; - ioc3->sscr_a = 0; - ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | - SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | - SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | - SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR); - ioc3->sio_iec |= SIO_IR_SB_INT; - ioc3->sscr_b = 0; + sio_iec = readl(&ioc3->sio_iec); + sio_iec &= ~(SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | + SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | + SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | + SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR); + sio_iec |= SIO_IR_SA_INT; + sio_iec &= ~(SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | + SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | + SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | + SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR); + sio_iec |= SIO_IR_SB_INT; + writel(sio_iec, &ioc3->sio_iec); + writel(0, &ioc3->port_a.sscr); + writel(0, &ioc3->port_b.sscr); ioc3_8250_register(&ioc3->sregs.uarta); ioc3_8250_register(&ioc3->sregs.uartb); @@ -1236,15 +1181,15 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err < 0) { - printk(KERN_ERR "%s: Unable to obtain 64 bit DMA " - "for consistent allocations\n", pci_name(pdev)); + pr_err("%s: Unable to obtain 64 bit DMA for consistent allocations\n", + pci_name(pdev)); goto out; } } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - printk(KERN_ERR "%s: No usable DMA configuration, " - "aborting.\n", pci_name(pdev)); + pr_err("%s: No usable DMA configuration, aborting.\n", + pci_name(pdev)); goto out; } pci_using_dac = 0; @@ -1270,19 +1215,22 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ip = netdev_priv(dev); ip->dev = dev; + ip->dma_dev = &pdev->dev; dev->irq = pdev->irq; ioc3_base = pci_resource_start(pdev, 0); ioc3_size = pci_resource_len(pdev, 0); - ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size); + ioc3 = (struct ioc3 *)ioremap(ioc3_base, ioc3_size); if (!ioc3) { - printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n", + pr_err("ioc3eth(%s): ioremap failed, goodbye.\n", pci_name(pdev)); err = -ENOMEM; goto out_res; } - ip->regs = ioc3; + ip->regs = &ioc3->eth; + ip->ssram = ioc3->ssram; + ip->all_regs = ioc3; #ifdef CONFIG_SERIAL_8250 ioc3_serial_probe(pdev, ioc3); @@ -1292,6 +1240,26 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) timer_setup(&ip->ioc3_timer, ioc3_timer, 0); ioc3_stop(ip); + + /* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */ + ip->rxr = dma_direct_alloc_pages(ip->dma_dev, RX_RING_SIZE, + &ip->rxr_dma, GFP_ATOMIC, 0); + if (!ip->rxr) { + pr_err("ioc3-eth: rx ring allocation failed\n"); + err = -ENOMEM; + goto out_stop; + } + + /* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */ + ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE, + &ip->txr_dma, + GFP_KERNEL | __GFP_ZERO, 0); + if (!ip->txr) { + pr_err("ioc3-eth: tx ring allocation failed\n"); + err = -ENOMEM; + goto out_stop; + } + ioc3_init(dev); ip->pdev = pdev; @@ -1305,7 +1273,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ioc3_mii_init(ip); if (ip->mii.phy_id == -1) { - printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n", + pr_err("ioc3-eth(%s): Didn't find a PHY, goodbye.\n", pci_name(pdev)); err = -ENODEV; goto out_stop; @@ -1335,24 +1303,27 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) vendor = (sw_physid1 << 12) | (sw_physid2 >> 4); model = (sw_physid2 >> 4) & 0x3f; rev = sw_physid2 & 0xf; - printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, " - "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev); - printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name, - ip->emcr & EMCR_BUFSIZ ? 128 : 64); + netdev_info(dev, "Using PHY %d, vendor 0x%x, model %d, rev %d.\n", + ip->mii.phy_id, vendor, model, rev); + netdev_info(dev, "IOC3 SSRAM has %d kbyte.\n", + ip->emcr & EMCR_BUFSIZ ? 128 : 64); return 0; out_stop: - ioc3_stop(ip); del_timer_sync(&ip->ioc3_timer); - ioc3_free_rings(ip); + if (ip->rxr) + dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr, + ip->rxr_dma, 0); + if (ip->txr) + dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr, + ip->txr_dma, 0); out_res: pci_release_regions(pdev); out_free: free_netdev(dev); out_disable: - /* - * We should call pci_disable_device(pdev); here if the IOC3 wasn't + /* We should call pci_disable_device(pdev); here if the IOC3 wasn't * such a weird device ... */ out: @@ -1363,16 +1334,19 @@ static void ioc3_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct ioc3_private *ip = netdev_priv(dev); - struct ioc3 *ioc3 = ip->regs; + + dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr, + ip->rxr_dma, 0); + dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr, + ip->txr_dma, 0); unregister_netdev(dev); del_timer_sync(&ip->ioc3_timer); - iounmap(ioc3); + iounmap(ip->all_regs); pci_release_regions(pdev); free_netdev(dev); - /* - * We should call pci_disable_device(pdev); here if the IOC3 wasn't + /* We should call pci_disable_device(pdev); here if the IOC3 wasn't * such a weird device ... */ } @@ -1392,16 +1366,14 @@ static struct pci_driver ioc3_driver = { static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) { - unsigned long data; struct ioc3_private *ip = netdev_priv(dev); - struct ioc3 *ioc3 = ip->regs; - unsigned int len; struct ioc3_etxd *desc; - uint32_t w0 = 0; + unsigned long data; + unsigned int len; int produce; + u32 w0 = 0; - /* - * IOC3 has a fairly simple minded checksumming hardware which simply + /* IOC3 has a fairly simple minded checksumming hardware which simply * adds up the 1's complement checksum for the entire packet and * inserts it at an offset which can be specified in the descriptor * into the transmit packet. This means we have to compensate for the @@ -1412,25 +1384,23 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) const struct iphdr *ih = ip_hdr(skb); const int proto = ntohs(ih->protocol); unsigned int csoff; - uint32_t csum, ehsum; - uint16_t *eh; + u32 csum, ehsum; + u16 *eh; /* The MAC header. skb->mac seem the logic approach - to find the MAC header - except it's a NULL pointer ... */ - eh = (uint16_t *) skb->data; + * to find the MAC header - except it's a NULL pointer ... + */ + eh = (u16 *)skb->data; /* Sum up dest addr, src addr and protocol */ ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6]; - /* Fold ehsum. can't use csum_fold which negates also ... */ - ehsum = (ehsum & 0xffff) + (ehsum >> 16); - ehsum = (ehsum & 0xffff) + (ehsum >> 16); - /* Skip IP header; it's sum is always zero and was - already filled in by ip_output.c */ + * already filled in by ip_output.c + */ csum = csum_tcpudp_nofold(ih->saddr, ih->daddr, - ih->tot_len - (ih->ihl << 2), - proto, 0xffff ^ ehsum); + ih->tot_len - (ih->ihl << 2), + proto, csum_fold(ehsum)); csum = (csum & 0xffff) + (csum >> 16); /* Fold again */ csum = (csum & 0xffff) + (csum >> 16); @@ -1450,7 +1420,7 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irq(&ip->ioc3_lock); - data = (unsigned long) skb->data; + data = (unsigned long)skb->data; len = skb->len; produce = ip->tx_pi; @@ -1470,47 +1440,78 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long b2 = (data | 0x3fffUL) + 1UL; unsigned long s1 = b2 - data; unsigned long s2 = data + len - b2; + dma_addr_t d1, d2; desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | - ETXD_B1V | ETXD_B2V | w0); + ETXD_B1V | ETXD_B2V | w0); desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) | - (s2 << ETXD_B2CNT_SHIFT)); - desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1)); - desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1)); + (s2 << ETXD_B2CNT_SHIFT)); + d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE); + if (dma_mapping_error(ip->dma_dev, d1)) + goto drop_packet; + d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE); + if (dma_mapping_error(ip->dma_dev, d2)) { + dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE); + goto drop_packet; + } + desc->p1 = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF)); + desc->p2 = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF)); } else { + dma_addr_t d; + /* Normal sized packet that doesn't cross a page boundary. */ desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0); desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT); - desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1)); + d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ip->dma_dev, d)) + goto drop_packet; + desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF)); } - BARRIER(); + mb(); /* make sure all descriptor changes are visible */ ip->tx_skbs[produce] = skb; /* Remember skb */ - produce = (produce + 1) & 127; + produce = (produce + 1) & TX_RING_MASK; ip->tx_pi = produce; - ioc3_w_etpir(produce << 7); /* Fire ... */ + writel(produce << 7, &ip->regs->etpir); /* Fire ... */ ip->txqlen++; - if (ip->txqlen >= 127) + if (ip->txqlen >= (TX_RING_ENTRIES - 1)) netif_stop_queue(dev); spin_unlock_irq(&ip->ioc3_lock); return NETDEV_TX_OK; + +drop_packet: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + + spin_unlock_irq(&ip->ioc3_lock); + + return NETDEV_TX_OK; } static void ioc3_timeout(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); - printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); + netdev_err(dev, "transmit timed out, resetting\n"); spin_lock_irq(&ip->ioc3_lock); ioc3_stop(ip); + ioc3_free_rx_bufs(ip); + ioc3_clean_tx_ring(ip); + ioc3_init(dev); + if (ioc3_alloc_rx_bufs(dev)) { + netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); + spin_unlock_irq(&ip->ioc3_lock); + return; + } + ioc3_start(ip); ioc3_mii_init(ip); ioc3_mii_start(ip); @@ -1519,16 +1520,14 @@ static void ioc3_timeout(struct net_device *dev) netif_wake_queue(dev); } -/* - * Given a multicast ethernet address, this routine calculates the +/* Given a multicast ethernet address, this routine calculates the * address's bit index in the logical address filter mask */ - static inline unsigned int ioc3_hash(const unsigned char *addr) { unsigned int temp = 0; - u32 crc; int bits; + u32 crc; crc = ether_crc_le(ETH_ALEN, addr); @@ -1542,8 +1541,8 @@ static inline unsigned int ioc3_hash(const unsigned char *addr) return temp; } -static void ioc3_get_drvinfo (struct net_device *dev, - struct ethtool_drvinfo *info) +static void ioc3_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) { struct ioc3_private *ip = netdev_priv(dev); @@ -1623,27 +1622,28 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static void ioc3_set_multicast_list(struct net_device *dev) { - struct netdev_hw_addr *ha; struct ioc3_private *ip = netdev_priv(dev); - struct ioc3 *ioc3 = ip->regs; + struct ioc3_ethregs *regs = ip->regs; + struct netdev_hw_addr *ha; u64 ehar = 0; - netif_stop_queue(dev); /* Lock out others. */ + spin_lock_irq(&ip->ioc3_lock); if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ ip->emcr |= EMCR_PROMISC; - ioc3_w_emcr(ip->emcr); - (void) ioc3_r_emcr(); + writel(ip->emcr, ®s->emcr); + readl(®s->emcr); } else { ip->emcr &= ~EMCR_PROMISC; - ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */ - (void) ioc3_r_emcr(); + writel(ip->emcr, ®s->emcr); /* Clear promiscuous. */ + readl(®s->emcr); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { /* Too many for hashing to make sense or we want all - multicast packets anyway, so skip computing all the - hashes and just accept all packets. */ + * multicast packets anyway, so skip computing all the + * hashes and just accept all packets. + */ ip->ehar_h = 0xffffffff; ip->ehar_l = 0xffffffff; } else { @@ -1653,11 +1653,11 @@ static void ioc3_set_multicast_list(struct net_device *dev) ip->ehar_h = ehar >> 32; ip->ehar_l = ehar & 0xffffffff; } - ioc3_w_ehar_h(ip->ehar_h); - ioc3_w_ehar_l(ip->ehar_l); + writel(ip->ehar_h, ®s->ehar_h); + writel(ip->ehar_l, ®s->ehar_l); } - netif_wake_queue(dev); /* Let us get going again. */ + spin_unlock_irq(&ip->ioc3_lock); } module_pci_driver(ioc3_driver); diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 00660dd820e2..539bc5db989c 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -247,8 +247,7 @@ static void meth_free_tx_ring(struct meth_private *priv) /* Remove any pending skb */ for (i = 0; i < TX_RING_ENTRIES; i++) { - if (priv->tx_skbs[i]) - dev_kfree_skb(priv->tx_skbs[i]); + dev_kfree_skb(priv->tx_skbs[i]); priv->tx_skbs[i] = NULL; } dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring, diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 6e07f5ebacfc..85eaccbbbac1 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -191,6 +191,8 @@ struct sis900_private { unsigned int tx_full; /* The Tx queue is full. */ u8 host_bridge_rev; u8 chipset_rev; + /* EEPROM data */ + int eeprom_size; }; MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>"); @@ -475,6 +477,8 @@ static int sis900_probe(struct pci_dev *pci_dev, sis_priv->pci_dev = pci_dev; spin_lock_init(&sis_priv->lock); + sis_priv->eeprom_size = 24; + pci_set_drvdata(pci_dev, net_dev); ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma); @@ -2122,6 +2126,68 @@ static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *w wol->supported = (WAKE_PHY | WAKE_MAGIC); } +static int sis900_get_eeprom_len(struct net_device *dev) +{ + struct sis900_private *sis_priv = netdev_priv(dev); + + return sis_priv->eeprom_size; +} + +static int sis900_read_eeprom(struct net_device *net_dev, u8 *buf) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + int wait, ret = -EAGAIN; + u16 signature; + u16 *ebuf = (u16 *)buf; + int i; + + if (sis_priv->chipset_rev == SIS96x_900_REV) { + sw32(mear, EEREQ); + for (wait = 0; wait < 2000; wait++) { + if (sr32(mear) & EEGNT) { + /* read 16 bits, and index by 16 bits */ + for (i = 0; i < sis_priv->eeprom_size / 2; i++) + ebuf[i] = (u16)read_eeprom(ioaddr, i); + ret = 0; + break; + } + udelay(1); + } + sw32(mear, EEDONE); + } else { + signature = (u16)read_eeprom(ioaddr, EEPROMSignature); + if (signature != 0xffff && signature != 0x0000) { + /* read 16 bits, and index by 16 bits */ + for (i = 0; i < sis_priv->eeprom_size / 2; i++) + ebuf[i] = (u16)read_eeprom(ioaddr, i); + ret = 0; + } + } + return ret; +} + +#define SIS900_EEPROM_MAGIC 0xBABE +static int sis900_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) +{ + struct sis900_private *sis_priv = netdev_priv(dev); + u8 *eebuf; + int res; + + eebuf = kmalloc(sis_priv->eeprom_size, GFP_KERNEL); + if (!eebuf) + return -ENOMEM; + + eeprom->magic = SIS900_EEPROM_MAGIC; + spin_lock_irq(&sis_priv->lock); + res = sis900_read_eeprom(dev, eebuf); + spin_unlock_irq(&sis_priv->lock); + if (!res) + memcpy(data, eebuf + eeprom->offset, eeprom->len); + kfree(eebuf); + return res; +} + static const struct ethtool_ops sis900_ethtool_ops = { .get_drvinfo = sis900_get_drvinfo, .get_msglevel = sis900_get_msglevel, @@ -2132,6 +2198,8 @@ static const struct ethtool_ops sis900_ethtool_ops = { .set_wol = sis900_set_wol, .get_link_ksettings = sis900_get_link_ksettings, .set_link_ksettings = sis900_set_link_ksettings, + .get_eeprom_len = sis900_get_eeprom_len, + .get_eeprom = sis900_get_eeprom, }; /** diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 601e76ad99a0..3a6761131f4c 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -378,8 +378,7 @@ static void smc_shutdown(struct net_device *dev) pending_skb = lp->pending_tx_skb; lp->pending_tx_skb = NULL; spin_unlock_irq(&lp->lock); - if (pending_skb) - dev_kfree_skb(pending_skb); + dev_kfree_skb(pending_skb); /* and tell the card to stay away from that nasty outside world */ SMC_SELECT_BANK(lp, 0); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 51a7b48db4bc..10d0c3e478ab 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1553,7 +1553,6 @@ static int ave_probe(struct platform_device *pdev) struct ave_private *priv; struct net_device *ndev; struct device_node *np; - struct resource *res; const void *mac_addr; void __iomem *base; const char *name; @@ -1573,13 +1572,10 @@ static int ave_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "IRQ not found\n"); + if (irq < 0) return irq; - } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 2325b40dff6e..338e25a6374e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -200,6 +200,7 @@ endif config STMMAC_PCI tristate "STMMAC PCI bus support" depends on STMMAC_ETH && PCI + depends on COMMON_CLK ---help--- This selects the platform specific bus support for the stmmac driver. This driver was tested on XLINX XC2V3000 FF1152AMT0221 diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index ed872eed1cab..912bbb6515b2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -75,6 +75,7 @@ struct stmmac_extra_stats { unsigned long rx_missed_cntr; unsigned long rx_overflow_cntr; unsigned long rx_vlan; + unsigned long rx_split_hdr_pkt_n; /* Tx/Rx IRQ error info */ unsigned long tx_undeflow_irq; unsigned long tx_process_stopped_irq; @@ -354,6 +355,13 @@ struct dma_features { unsigned int frpbs; unsigned int frpes; unsigned int addr64; + unsigned int rssen; + unsigned int vlhash; + unsigned int sphen; + unsigned int vlins; + unsigned int dvlan; + unsigned int l3l4fnum; + unsigned int arpoffsel; }; /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ @@ -381,6 +389,16 @@ struct dma_features { #define JUMBO_LEN 9000 +/* Receive Side Scaling */ +#define STMMAC_RSS_HASH_KEY_SIZE 40 +#define STMMAC_RSS_MAX_TABLE_SIZE 256 + +/* VLAN */ +#define STMMAC_VLAN_NONE 0x0 +#define STMMAC_VLAN_REMOVE 0x1 +#define STMMAC_VLAN_INSERT 0x2 +#define STMMAC_VLAN_REPLACE 0x3 + extern const struct stmmac_desc_ops enh_desc_ops; extern const struct stmmac_desc_ops ndesc_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index 6ce3a7fb41ab..527f93320a5a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -62,12 +62,10 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv) static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev) { int phy_mode; - struct resource *res; void __iomem *ctl_block; struct anarion_gmac *gmac; - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - ctl_block = devm_ioremap_resource(&pdev->dev, res); + ctl_block = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(ctl_block)) { dev_err(&pdev->dev, "Cannot get reset region (%ld)!\n", PTR_ERR(ctl_block)); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 3a14cdd01f5f..dd9967aeda22 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -333,6 +333,9 @@ static void *tegra_eqos_probe(struct platform_device *pdev, usleep_range(2000, 4000); gpiod_set_value(eqos->reset, 0); + /* MDIO bus was already reset just above */ + data->mdio_bus_data->needs_reset = false; + eqos->rst = devm_reset_control_get(&pdev->dev, "eqos"); if (IS_ERR(eqos->rst)) { err = PTR_ERR(eqos->rst); @@ -415,7 +418,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) const struct dwc_eth_dwmac_data *data; struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; - struct resource *res; void *priv; int ret; @@ -428,17 +430,11 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) * resource initialization is done in the glue logic. */ stmmac_res.irq = platform_get_irq(pdev, 0); - if (stmmac_res.irq < 0) { - if (stmmac_res.irq != -EPROBE_DEFER) - dev_err(&pdev->dev, - "IRQ configuration information not found\n"); - + if (stmmac_res.irq < 0) return stmmac_res.irq; - } stmmac_res.wol_irq = stmmac_res.irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res); + stmmac_res.addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(stmmac_res.addr)) return PTR_ERR(stmmac_res.addr); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index 88eb16954627..bbc16b5a410a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -46,7 +46,6 @@ static int meson6_dwmac_probe(struct platform_device *pdev) struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; struct meson_dwmac *dwmac; - struct resource *res; int ret; ret = stmmac_get_platform_resources(pdev, &stmmac_res); @@ -63,8 +62,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - dwmac->reg = devm_ioremap_resource(&pdev->dev, res); + dwmac->reg = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(dwmac->reg)) { ret = PTR_ERR(dwmac->reg); goto err_remove_config_dt; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 786ca4a7bf36..9cda29e4b89d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -308,7 +308,6 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; - struct resource *res; struct meson8b_dwmac *dwmac; int ret; @@ -332,8 +331,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) ret = -EINVAL; goto err_remove_config_dt; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - dwmac->regs = devm_ioremap_resource(&pdev->dev, res); + dwmac->regs = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(dwmac->regs)) { ret = PTR_ERR(dwmac->regs); goto err_remove_config_dt; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index c141fe783e87..e0212d2fc2a1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -46,7 +46,6 @@ struct socfpga_dwmac_ops { }; struct socfpga_dwmac { - int interface; u32 reg_offset; u32 reg_shift; struct device *dev; @@ -110,8 +109,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * struct resource res_tse_pcs; struct resource res_sgmii_adapter; - dwmac->interface = of_get_phy_mode(np); - sys_mgr_base_addr = altr_sysmgr_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon"); if (IS_ERR(sys_mgr_base_addr)) { @@ -231,6 +228,14 @@ err_node_put: return ret; } +static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac) +{ + struct net_device *ndev = dev_get_drvdata(dwmac->dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + return priv->plat->interface; +} + static int socfpga_set_phy_mode_common(int phymode, u32 *val) { switch (phymode) { @@ -255,7 +260,7 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val) static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac) { struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr; - int phymode = dwmac->interface; + int phymode = socfpga_get_plat_phymode(dwmac); u32 reg_offset = dwmac->reg_offset; u32 reg_shift = dwmac->reg_shift; u32 ctrl, val, module; @@ -314,7 +319,7 @@ static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac) static int socfpga_gen10_set_phy_mode(struct socfpga_dwmac *dwmac) { struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr; - int phymode = dwmac->interface; + int phymode = socfpga_get_plat_phymode(dwmac); u32 reg_offset = dwmac->reg_offset; u32 reg_shift = dwmac->reg_shift; u32 ctrl, val, module; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 2ed11a581d80..89a3420eba42 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -16,7 +16,10 @@ #define GMAC_CONFIG 0x00000000 #define GMAC_PACKET_FILTER 0x00000008 #define GMAC_HASH_TAB(x) (0x10 + (x) * 4) +#define GMAC_VLAN_TAG 0x00000050 +#define GMAC_VLAN_HASH_TABLE 0x00000058 #define GMAC_RX_FLOW_CTRL 0x00000090 +#define GMAC_VLAN_INCL 0x00000060 #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) #define GMAC_TXQ_PRTY_MAP0 0x98 #define GMAC_TXQ_PRTY_MAP1 0x9C @@ -37,6 +40,7 @@ #define GMAC_HW_FEATURE3 0x00000128 #define GMAC_MDIO_ADDR 0x00000200 #define GMAC_MDIO_DATA 0x00000204 +#define GMAC_ARP_ADDR 0x00000210 #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) @@ -62,9 +66,22 @@ #define GMAC_PACKET_FILTER_PM BIT(4) #define GMAC_PACKET_FILTER_PCF BIT(7) #define GMAC_PACKET_FILTER_HPF BIT(10) +#define GMAC_PACKET_FILTER_VTFE BIT(16) #define GMAC_MAX_PERFECT_ADDRESSES 128 +/* MAC VLAN */ +#define GMAC_VLAN_EDVLP BIT(26) +#define GMAC_VLAN_VTHM BIT(25) +#define GMAC_VLAN_DOVLTC BIT(20) +#define GMAC_VLAN_ESVL BIT(18) +#define GMAC_VLAN_ETV BIT(16) +#define GMAC_VLAN_VID GENMASK(15, 0) +#define GMAC_VLAN_VLTI BIT(20) +#define GMAC_VLAN_CSVL BIT(19) +#define GMAC_VLAN_VLC GENMASK(17, 16) +#define GMAC_VLAN_VLC_SHIFT 16 + /* MAC RX Queue Enable */ #define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2)) #define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2) @@ -149,6 +166,9 @@ enum power_event { #define GMAC_DEBUG_RPESTS BIT(0) /* MAC config */ +#define GMAC_CONFIG_ARPEN BIT(31) +#define GMAC_CONFIG_SARC GENMASK(30, 28) +#define GMAC_CONFIG_SARC_SHIFT 28 #define GMAC_CONFIG_IPC BIT(27) #define GMAC_CONFIG_2K BIT(22) #define GMAC_CONFIG_ACS BIT(20) @@ -164,11 +184,13 @@ enum power_event { #define GMAC_CONFIG_RE BIT(0) /* MAC HW features0 bitmap */ +#define GMAC_HW_FEAT_SAVLANINS BIT(27) #define GMAC_HW_FEAT_ADDMAC BIT(18) #define GMAC_HW_FEAT_RXCOESEL BIT(16) #define GMAC_HW_FEAT_TXCOSEL BIT(14) #define GMAC_HW_FEAT_EEESEL BIT(13) #define GMAC_HW_FEAT_TSSEL BIT(12) +#define GMAC_HW_FEAT_ARPOFFSEL BIT(9) #define GMAC_HW_FEAT_MMCSEL BIT(8) #define GMAC_HW_FEAT_MGKSEL BIT(7) #define GMAC_HW_FEAT_RWKSEL BIT(6) @@ -198,6 +220,7 @@ enum power_event { #define GMAC_HW_FEAT_FRPES GENMASK(14, 13) #define GMAC_HW_FEAT_FRPBS GENMASK(12, 11) #define GMAC_HW_FEAT_FRPSEL BIT(10) +#define GMAC_HW_FEAT_DVLAN BIT(5) /* MAC HW ADDR regs */ #define GMAC_HI_DCS GENMASK(18, 16) @@ -352,7 +375,8 @@ enum power_event { /* Default operating mode of the MAC */ #define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \ - GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) + GMAC_CONFIG_BE | GMAC_CONFIG_DCRS | \ + GMAC_CONFIG_JE) /* To dump the core regs excluding the Address Registers */ #define GMAC_REG_NUM 132 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index fc9954e4a772..9b4b5f69fc02 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -25,15 +25,9 @@ static void dwmac4_core_init(struct mac_device_info *hw, { void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONFIG); - int mtu = dev->mtu; value |= GMAC_CORE_INIT; - if (mtu > 1500) - value |= GMAC_CONFIG_2K; - if (mtu > 2000) - value |= GMAC_CONFIG_JE; - if (hw->ps) { value |= GMAC_CONFIG_TE; @@ -737,6 +731,73 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable) writel(value, ioaddr + GMAC_CONFIG); } +static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash, + bool is_double) +{ + void __iomem *ioaddr = hw->pcsr; + + writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE); + + if (hash) { + u32 value = GMAC_VLAN_VTHM | GMAC_VLAN_ETV; + if (is_double) { + value |= GMAC_VLAN_EDVLP; + value |= GMAC_VLAN_ESVL; + value |= GMAC_VLAN_DOVLTC; + } + + writel(value, ioaddr + GMAC_VLAN_TAG); + } else { + u32 value = readl(ioaddr + GMAC_VLAN_TAG); + + value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV); + value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL); + value &= ~GMAC_VLAN_DOVLTC; + value &= ~GMAC_VLAN_VID; + + writel(value, ioaddr + GMAC_VLAN_TAG); + } +} + +static void dwmac4_sarc_configure(void __iomem *ioaddr, int val) +{ + u32 value = readl(ioaddr + GMAC_CONFIG); + + value &= ~GMAC_CONFIG_SARC; + value |= val << GMAC_CONFIG_SARC_SHIFT; + + writel(value, ioaddr + GMAC_CONFIG); +} + +static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC_VLAN_INCL); + value |= GMAC_VLAN_VLTI; + value |= GMAC_VLAN_CSVL; /* Only use SVLAN */ + value &= ~GMAC_VLAN_VLC; + value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC; + writel(value, ioaddr + GMAC_VLAN_INCL); +} + +static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en, + u32 addr) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(addr, ioaddr + GMAC_ARP_ADDR); + + value = readl(ioaddr + GMAC_CONFIG); + if (en) + value |= GMAC_CONFIG_ARPEN; + else + value &= ~GMAC_CONFIG_ARPEN; + writel(value, ioaddr + GMAC_CONFIG); +} + const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, .set_mac = stmmac_set_mac, @@ -767,6 +828,10 @@ const struct stmmac_ops dwmac4_ops = { .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, .set_mac_loopback = dwmac4_set_mac_loopback, + .update_vlan_hash = dwmac4_update_vlan_hash, + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, }; const struct stmmac_ops dwmac410_ops = { @@ -799,6 +864,10 @@ const struct stmmac_ops dwmac410_ops = { .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, .set_mac_loopback = dwmac4_set_mac_loopback, + .update_vlan_hash = dwmac4_update_vlan_hash, + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, }; const struct stmmac_ops dwmac510_ops = { @@ -836,6 +905,10 @@ const struct stmmac_ops dwmac510_ops = { .rxp_config = dwmac5_rxp_config, .flex_pps_config = dwmac5_flex_pps_config, .set_mac_loopback = dwmac4_set_mac_loopback, + .update_vlan_hash = dwmac4_update_vlan_hash, + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, }; int dwmac4_setup(struct stmmac_priv *priv) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index dbde23e7e169..15eb1abba91d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -443,6 +443,13 @@ static void dwmac4_clear(struct dma_desc *p) p->des3 = 0; } +static void dwmac4_set_sarc(struct dma_desc *p, u32 sarc_type) +{ + sarc_type <<= TDES3_SA_INSERT_CTRL_SHIFT; + + p->des3 |= cpu_to_le32(sarc_type & TDES3_SA_INSERT_CTRL_MASK); +} + static int set_16kib_bfsize(int mtu) { int ret = 0; @@ -452,6 +459,39 @@ static int set_16kib_bfsize(int mtu) return ret; } +static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag, + u32 inner_type) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; + + /* Inner VLAN */ + if (inner_type) { + u32 des = inner_tag << TDES2_IVT_SHIFT; + + des &= TDES2_IVT_MASK; + p->des2 = cpu_to_le32(des); + + des = inner_type << TDES3_IVTIR_SHIFT; + des &= TDES3_IVTIR_MASK; + p->des3 = cpu_to_le32(des | TDES3_IVLTV); + } + + /* Outer VLAN */ + p->des3 |= cpu_to_le32(tag & TDES3_VLAN_TAG); + p->des3 |= cpu_to_le32(TDES3_VLTV); + + p->des3 |= cpu_to_le32(TDES3_CONTEXT_TYPE); +} + +static void dwmac4_set_vlan(struct dma_desc *p, u32 type) +{ + type <<= TDES2_VLAN_TAG_SHIFT; + p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK); +} + const struct stmmac_desc_ops dwmac4_desc_ops = { .tx_status = dwmac4_wrback_get_tx_status, .rx_status = dwmac4_wrback_get_rx_status, @@ -476,6 +516,9 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { .get_addr = dwmac4_get_addr, .set_addr = dwmac4_set_addr, .clear = dwmac4_clear, + .set_sarc = dwmac4_set_sarc, + .set_vlan_tag = dwmac4_set_vlan_tag, + .set_vlan = dwmac4_set_vlan, }; const struct stmmac_mode_ops dwmac4_ring_mode_ops = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h index f58191174287..0d7b3bbcd5a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h @@ -18,13 +18,21 @@ /* TDES2 (read format) */ #define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0) #define TDES2_VLAN_TAG_MASK GENMASK(15, 14) +#define TDES2_VLAN_TAG_SHIFT 14 #define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16) #define TDES2_BUFFER2_SIZE_MASK_SHIFT 16 +#define TDES3_IVTIR_MASK GENMASK(19, 18) +#define TDES3_IVTIR_SHIFT 18 +#define TDES3_IVLTV BIT(17) #define TDES2_TIMESTAMP_ENABLE BIT(30) +#define TDES2_IVT_MASK GENMASK(31, 16) +#define TDES2_IVT_SHIFT 16 #define TDES2_INTERRUPT_ON_COMPLETION BIT(31) /* TDES3 (read format) */ #define TDES3_PACKET_SIZE_MASK GENMASK(14, 0) +#define TDES3_VLAN_TAG GENMASK(15, 0) +#define TDES3_VLTV BIT(16) #define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16) #define TDES3_CHECKSUM_INSERTION_SHIFT 16 #define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0) @@ -32,6 +40,7 @@ #define TDES3_HDR_LEN_SHIFT 19 #define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19) #define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23) +#define TDES3_SA_INSERT_CTRL_SHIFT 23 #define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26) /* TDES3 (write back format) */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 3ed5508586ef..68c157979b94 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -333,7 +333,7 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL); dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1; dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2; - dma_cap->hash_filter = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4; + dma_cap->vlhash = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4; dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18; dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3; dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5; @@ -348,6 +348,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, /* TX and RX csum */ dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14; dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16; + dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27; + dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9; /* MAC HW feature1 */ hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); @@ -385,6 +387,7 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13; dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11; dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10; + dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5; } /* Enable/disable TSO feature and set MSS */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 3174b701aa90..5923ca62d793 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -32,6 +32,9 @@ #define XGMAC_CONFIG_ARPEN BIT(31) #define XGMAC_CONFIG_GPSL GENMASK(29, 16) #define XGMAC_CONFIG_GPSL_SHIFT 16 +#define XGMAC_CONFIG_HDSMS GENMASK(14, 12) +#define XGMAC_CONFIG_HDSMS_SHIFT 12 +#define XGMAC_CONFIG_HDSMS_256 (0x2 << XGMAC_CONFIG_HDSMS_SHIFT) #define XGMAC_CONFIG_S2KP BIT(11) #define XGMAC_CONFIG_LM BIT(10) #define XGMAC_CONFIG_IPC BIT(9) @@ -41,9 +44,12 @@ #define XGMAC_CONFIG_CST BIT(2) #define XGMAC_CONFIG_ACS BIT(1) #define XGMAC_CONFIG_RE BIT(0) -#define XGMAC_CORE_INIT_RX 0 +#define XGMAC_CORE_INIT_RX (XGMAC_CONFIG_GPSLCE | XGMAC_CONFIG_WD | \ + (XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT)) #define XGMAC_PACKET_FILTER 0x00000008 #define XGMAC_FILTER_RA BIT(31) +#define XGMAC_FILTER_IPFE BIT(20) +#define XGMAC_FILTER_VTFE BIT(16) #define XGMAC_FILTER_HPF BIT(10) #define XGMAC_FILTER_PCF BIT(7) #define XGMAC_FILTER_PM BIT(4) @@ -51,6 +57,19 @@ #define XGMAC_FILTER_PR BIT(0) #define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4) #define XGMAC_MAX_HASH_TABLE 8 +#define XGMAC_VLAN_TAG 0x00000050 +#define XGMAC_VLAN_EDVLP BIT(26) +#define XGMAC_VLAN_VTHM BIT(25) +#define XGMAC_VLAN_DOVLTC BIT(20) +#define XGMAC_VLAN_ESVL BIT(18) +#define XGMAC_VLAN_ETV BIT(16) +#define XGMAC_VLAN_VID GENMASK(15, 0) +#define XGMAC_VLAN_HASH_TABLE 0x00000058 +#define XGMAC_VLAN_INCL 0x00000060 +#define XGMAC_VLAN_VLTI BIT(20) +#define XGMAC_VLAN_CSVL BIT(19) +#define XGMAC_VLAN_VLC GENMASK(17, 16) +#define XGMAC_VLAN_VLC_SHIFT 16 #define XGMAC_RXQ_CTRL0 0x000000a0 #define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2) #define XGMAC_RXQEN_SHIFT(x) ((x) * 2) @@ -59,6 +78,7 @@ #define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8) #define XGMAC_PSRQ_SHIFT(x) ((x) * 8) #define XGMAC_INT_STATUS 0x000000b0 +#define XGMAC_LPIIS BIT(5) #define XGMAC_PMTIS BIT(4) #define XGMAC_INT_EN 0x000000b4 #define XGMAC_TSIE BIT(12) @@ -76,19 +96,35 @@ #define XGMAC_RWKPKTEN BIT(2) #define XGMAC_MGKPKTEN BIT(1) #define XGMAC_PWRDWN BIT(0) +#define XGMAC_LPI_CTRL 0x000000d0 +#define XGMAC_TXCGE BIT(21) +#define XGMAC_LPITXA BIT(19) +#define XGMAC_PLS BIT(17) +#define XGMAC_LPITXEN BIT(16) +#define XGMAC_RLPIEX BIT(3) +#define XGMAC_RLPIEN BIT(2) +#define XGMAC_TLPIEX BIT(1) +#define XGMAC_TLPIEN BIT(0) +#define XGMAC_LPI_TIMER_CTRL 0x000000d4 #define XGMAC_HW_FEATURE0 0x0000011c #define XGMAC_HWFEAT_SAVLANINS BIT(27) #define XGMAC_HWFEAT_RXCOESEL BIT(16) #define XGMAC_HWFEAT_TXCOESEL BIT(14) +#define XGMAC_HWFEAT_EEESEL BIT(13) #define XGMAC_HWFEAT_TSSEL BIT(12) #define XGMAC_HWFEAT_AVSEL BIT(11) #define XGMAC_HWFEAT_RAVSEL BIT(10) #define XGMAC_HWFEAT_ARPOFFSEL BIT(9) +#define XGMAC_HWFEAT_MMCSEL BIT(8) #define XGMAC_HWFEAT_MGKSEL BIT(7) #define XGMAC_HWFEAT_RWKSEL BIT(6) +#define XGMAC_HWFEAT_VLHASH BIT(4) #define XGMAC_HWFEAT_GMIISEL BIT(1) #define XGMAC_HW_FEATURE1 0x00000120 +#define XGMAC_HWFEAT_L3L4FNUM GENMASK(30, 27) +#define XGMAC_HWFEAT_RSSEN BIT(20) #define XGMAC_HWFEAT_TSOEN BIT(18) +#define XGMAC_HWFEAT_SPHEN BIT(17) #define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14) #define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6) #define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0) @@ -98,6 +134,16 @@ #define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12) #define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6) #define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0) +#define XGMAC_HW_FEATURE3 0x00000128 +#define XGMAC_HWFEAT_ASP GENMASK(15, 14) +#define XGMAC_HWFEAT_DVLAN BIT(13) +#define XGMAC_HWFEAT_FRPES GENMASK(12, 11) +#define XGMAC_HWFEAT_FRPPB GENMASK(10, 9) +#define XGMAC_HWFEAT_FRPSEL BIT(3) +#define XGMAC_MAC_DPP_FSM_INT_STATUS 0x00000150 +#define XGMAC_MAC_FSM_CONTROL 0x00000158 +#define XGMAC_PRTYEN BIT(1) +#define XGMAC_TMOUTEN BIT(0) #define XGMAC_MDIO_ADDR 0x00000200 #define XGMAC_MDIO_DATA 0x00000204 #define XGMAC_MDIO_C22P 0x00000220 @@ -107,15 +153,74 @@ #define XGMAC_DCS GENMASK(19, 16) #define XGMAC_DCS_SHIFT 16 #define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8) +#define XGMAC_L3L4_ADDR_CTRL 0x00000c00 +#define XGMAC_IDDR GENMASK(15, 8) +#define XGMAC_IDDR_SHIFT 8 +#define XGMAC_IDDR_FNUM 4 +#define XGMAC_TT BIT(1) +#define XGMAC_XB BIT(0) +#define XGMAC_L3L4_DATA 0x00000c04 +#define XGMAC_L3L4_CTRL 0x0 +#define XGMAC_L4DPIM0 BIT(21) +#define XGMAC_L4DPM0 BIT(20) +#define XGMAC_L4SPIM0 BIT(19) +#define XGMAC_L4SPM0 BIT(18) +#define XGMAC_L4PEN0 BIT(16) +#define XGMAC_L3HDBM0 GENMASK(15, 11) +#define XGMAC_L3HSBM0 GENMASK(10, 6) +#define XGMAC_L3DAIM0 BIT(5) +#define XGMAC_L3DAM0 BIT(4) +#define XGMAC_L3SAIM0 BIT(3) +#define XGMAC_L3SAM0 BIT(2) +#define XGMAC_L3PEN0 BIT(0) +#define XGMAC_L4_ADDR 0x1 +#define XGMAC_L4DP0 GENMASK(31, 16) +#define XGMAC_L4DP0_SHIFT 16 +#define XGMAC_L4SP0 GENMASK(15, 0) +#define XGMAC_L3_ADDR0 0x4 +#define XGMAC_L3_ADDR1 0x5 +#define XGMAC_L3_ADDR2 0x6 +#define XMGAC_L3_ADDR3 0x7 #define XGMAC_ARP_ADDR 0x00000c10 +#define XGMAC_RSS_CTRL 0x00000c80 +#define XGMAC_UDP4TE BIT(3) +#define XGMAC_TCP4TE BIT(2) +#define XGMAC_IP2TE BIT(1) +#define XGMAC_RSSE BIT(0) +#define XGMAC_RSS_ADDR 0x00000c88 +#define XGMAC_RSSIA_SHIFT 8 +#define XGMAC_ADDRT BIT(2) +#define XGMAC_CT BIT(1) +#define XGMAC_OB BIT(0) +#define XGMAC_RSS_DATA 0x00000c8c #define XGMAC_TIMESTAMP_STATUS 0x00000d20 #define XGMAC_TXTSC BIT(15) #define XGMAC_TXTIMESTAMP_NSEC 0x00000d30 #define XGMAC_TXTSSTSLO GENMASK(30, 0) #define XGMAC_TXTIMESTAMP_SEC 0x00000d34 +#define XGMAC_PPS_CONTROL 0x00000d70 +#define XGMAC_PPS_MAXIDX(x) ((((x) + 1) * 8) - 1) +#define XGMAC_PPS_MINIDX(x) ((x) * 8) +#define XGMAC_PPSx_MASK(x) \ + GENMASK(XGMAC_PPS_MAXIDX(x), XGMAC_PPS_MINIDX(x)) +#define XGMAC_TRGTMODSELx(x, val) \ + GENMASK(XGMAC_PPS_MAXIDX(x) - 1, XGMAC_PPS_MAXIDX(x) - 2) & \ + ((val) << (XGMAC_PPS_MAXIDX(x) - 2)) +#define XGMAC_PPSCMDx(x, val) \ + GENMASK(XGMAC_PPS_MINIDX(x) + 3, XGMAC_PPS_MINIDX(x)) & \ + ((val) << XGMAC_PPS_MINIDX(x)) +#define XGMAC_PPSCMD_START 0x2 +#define XGMAC_PPSCMD_STOP 0x5 +#define XGMAC_PPSEN0 BIT(4) +#define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10) +#define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10) +#define XGMAC_TRGTBUSY0 BIT(31) +#define XGMAC_PPSx_INTERVAL(x) (0x00000d88 + (x) * 0x10) +#define XGMAC_PPSx_WIDTH(x) (0x00000d8c + (x) * 0x10) /* MTL Registers */ #define XGMAC_MTL_OPMODE 0x00001000 +#define XGMAC_FRPE BIT(15) #define XGMAC_ETSALG GENMASK(6, 5) #define XGMAC_WRR (0x0 << 5) #define XGMAC_WFQ (0x1 << 5) @@ -124,8 +229,32 @@ #define XGMAC_MTL_INT_STATUS 0x00001020 #define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030 #define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034 -#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 3, (x) * 8) +#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 7, (x) * 8) #define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8) +#define XGMAC_QDDMACH BIT(7) +#define XGMAC_TC_PRTY_MAP0 0x00001040 +#define XGMAC_TC_PRTY_MAP1 0x00001044 +#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8) +#define XGMAC_PSTC_SHIFT(x) ((x) * 8) +#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0 +#define XGMAC_RXPI BIT(31) +#define XGMAC_NPE GENMASK(23, 16) +#define XGMAC_NVE GENMASK(7, 0) +#define XGMAC_MTL_RXP_IACC_CTRL_ST 0x000010b0 +#define XGMAC_STARTBUSY BIT(31) +#define XGMAC_WRRDN BIT(16) +#define XGMAC_ADDR GENMASK(9, 0) +#define XGMAC_MTL_RXP_IACC_DATA 0x000010b4 +#define XGMAC_MTL_ECC_CONTROL 0x000010c0 +#define XGMAC_MTL_SAFETY_INT_STATUS 0x000010c4 +#define XGMAC_MEUIS BIT(1) +#define XGMAC_MECIS BIT(0) +#define XGMAC_MTL_ECC_INT_ENABLE 0x000010c8 +#define XGMAC_RPCEIE BIT(12) +#define XGMAC_ECEIE BIT(8) +#define XGMAC_RXCEIE BIT(4) +#define XGMAC_TXCEIE BIT(0) +#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc #define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x))) #define XGMAC_TQS GENMASK(25, 16) #define XGMAC_TQS_SHIFT 16 @@ -164,6 +293,7 @@ #define XGMAC_RXOVFIS BIT(16) #define XGMAC_ABPSIS BIT(1) #define XGMAC_TXUNFIS BIT(0) +#define XGMAC_MAC_REGSIZE (XGMAC_MTL_QINT_STATUS(15) / 4) /* DMA Registers */ #define XGMAC_DMA_MODE 0x00003000 @@ -190,7 +320,18 @@ #define XGMAC_TDPS GENMASK(29, 0) #define XGMAC_RX_EDMA_CTRL 0x00003044 #define XGMAC_RDPS GENMASK(29, 0) +#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064 +#define XGMAC_MCSIS BIT(31) +#define XGMAC_MSUIS BIT(29) +#define XGMAC_MSCIS BIT(28) +#define XGMAC_DEUIS BIT(1) +#define XGMAC_DECIS BIT(0) +#define XGMAC_DMA_ECC_INT_ENABLE 0x00003068 +#define XGMAC_DCEIE BIT(1) +#define XGMAC_TCEIE BIT(0) +#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c #define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x))) +#define XGMAC_SPH BIT(24) #define XGMAC_PBLx8 BIT(16) #define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x))) #define XGMAC_TxPBL GENMASK(21, 16) @@ -230,12 +371,17 @@ #define XGMAC_TBU BIT(2) #define XGMAC_TPS BIT(1) #define XGMAC_TI BIT(0) +#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4) /* Descriptors */ +#define XGMAC_TDES2_IVT GENMASK(31, 16) +#define XGMAC_TDES2_IVT_SHIFT 16 #define XGMAC_TDES2_IOC BIT(31) #define XGMAC_TDES2_TTSE BIT(30) #define XGMAC_TDES2_B2L GENMASK(29, 16) #define XGMAC_TDES2_B2L_SHIFT 16 +#define XGMAC_TDES2_VTIR GENMASK(15, 14) +#define XGMAC_TDES2_VTIR_SHIFT 14 #define XGMAC_TDES2_B1L GENMASK(13, 0) #define XGMAC_TDES3_OWN BIT(31) #define XGMAC_TDES3_CTXT BIT(30) @@ -244,18 +390,33 @@ #define XGMAC_TDES3_CPC GENMASK(27, 26) #define XGMAC_TDES3_CPC_SHIFT 26 #define XGMAC_TDES3_TCMSSV BIT(26) +#define XGMAC_TDES3_SAIC GENMASK(25, 23) +#define XGMAC_TDES3_SAIC_SHIFT 23 #define XGMAC_TDES3_THL GENMASK(22, 19) #define XGMAC_TDES3_THL_SHIFT 19 +#define XGMAC_TDES3_IVTIR GENMASK(19, 18) +#define XGMAC_TDES3_IVTIR_SHIFT 18 #define XGMAC_TDES3_TSE BIT(18) +#define XGMAC_TDES3_IVLTV BIT(17) #define XGMAC_TDES3_CIC GENMASK(17, 16) #define XGMAC_TDES3_CIC_SHIFT 16 #define XGMAC_TDES3_TPL GENMASK(17, 0) +#define XGMAC_TDES3_VLTV BIT(16) +#define XGMAC_TDES3_VT GENMASK(15, 0) #define XGMAC_TDES3_FL GENMASK(14, 0) +#define XGMAC_RDES2_HL GENMASK(9, 0) #define XGMAC_RDES3_OWN BIT(31) #define XGMAC_RDES3_CTXT BIT(30) #define XGMAC_RDES3_IOC BIT(30) #define XGMAC_RDES3_LD BIT(28) #define XGMAC_RDES3_CDA BIT(27) +#define XGMAC_RDES3_RSV BIT(26) +#define XGMAC_RDES3_L34T GENMASK(23, 20) +#define XGMAC_RDES3_L34T_SHIFT 20 +#define XGMAC_L34T_IP4TCP 0x1 +#define XGMAC_L34T_IP4UDP 0x2 +#define XGMAC_L34T_IP6TCP 0x9 +#define XGMAC_L34T_IP6UDP 0xA #define XGMAC_RDES3_ES BIT(15) #define XGMAC_RDES3_PL GENMASK(13, 0) #define XGMAC_RDES3_TSD BIT(6) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 85c68b7ee8c6..d5173dd02a71 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -6,14 +6,15 @@ #include <linux/bitrev.h> #include <linux/crc32.h> +#include <linux/iopoll.h> #include "stmmac.h" +#include "stmmac_ptp.h" #include "dwxgmac2.h" static void dwxgmac2_core_init(struct mac_device_info *hw, struct net_device *dev) { void __iomem *ioaddr = hw->pcsr; - int mtu = dev->mtu; u32 tx, rx; tx = readl(ioaddr + XGMAC_TX_CONFIG); @@ -22,16 +23,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw, tx |= XGMAC_CORE_INIT_TX; rx |= XGMAC_CORE_INIT_RX; - if (mtu >= 9000) { - rx |= XGMAC_CONFIG_GPSLCE; - rx |= XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT; - rx |= XGMAC_CONFIG_WD; - } else if (mtu > 2000) { - rx |= XGMAC_CONFIG_JE; - } else if (mtu > 1500) { - rx |= XGMAC_CONFIG_S2KP; - } - if (hw->ps) { tx |= XGMAC_CONFIG_TE; tx &= ~hw->link.speed_mask; @@ -118,6 +109,23 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio, writel(value, ioaddr + reg); } +static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio, + u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value, reg; + + reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1; + if (queue >= 4) + queue -= 4; + + value = readl(ioaddr + reg); + value &= ~XGMAC_PSTC(queue); + value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue); + + writel(value, ioaddr + reg); +} + static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw, u32 rx_alg) { @@ -144,7 +152,9 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw, u32 tx_alg) { void __iomem *ioaddr = hw->pcsr; + bool ets = true; u32 value; + int i; value = readl(ioaddr + XGMAC_MTL_OPMODE); value &= ~XGMAC_ETSALG; @@ -160,10 +170,28 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw, value |= XGMAC_DWRR; break; default: + ets = false; break; } writel(value, ioaddr + XGMAC_MTL_OPMODE); + + /* Set ETS if desired */ + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) { + value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i)); + value &= ~XGMAC_TSA; + if (ets) + value |= XGMAC_ETS; + writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i)); + } +} + +static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw, + u32 weight, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + + writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue)); } static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue, @@ -200,11 +228,21 @@ static void dwxgmac2_config_cbs(struct mac_device_info *hw, writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); } +static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space) +{ + void __iomem *ioaddr = hw->pcsr; + int i; + + for (i = 0; i < XGMAC_MAC_REGSIZE; i++) + reg_space[i] = readl(ioaddr + i * 4); +} + static int dwxgmac2_host_irq_status(struct mac_device_info *hw, struct stmmac_extra_stats *x) { void __iomem *ioaddr = hw->pcsr; u32 stat, en; + int ret = 0; en = readl(ioaddr + XGMAC_INT_EN); stat = readl(ioaddr + XGMAC_INT_STATUS); @@ -216,7 +254,24 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw, readl(ioaddr + XGMAC_PMT); } - return 0; + if (stat & XGMAC_LPIIS) { + u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL); + + if (lpi & XGMAC_TLPIEN) { + ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE; + x->irq_tx_path_in_lpi_mode_n++; + } + if (lpi & XGMAC_TLPIEX) { + ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE; + x->irq_tx_path_exit_lpi_mode_n++; + } + if (lpi & XGMAC_RLPIEN) + x->irq_rx_path_in_lpi_mode_n++; + if (lpi & XGMAC_RLPIEX) + x->irq_rx_path_exit_lpi_mode_n++; + } + + return ret; } static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan) @@ -309,6 +364,53 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw, addr[5] = (hi_addr >> 8) & 0xff; } +static void dwxgmac2_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_LPI_CTRL); + + value |= XGMAC_LPITXEN | XGMAC_LPITXA; + if (en_tx_lpi_clockgating) + value |= XGMAC_TXCGE; + + writel(value, ioaddr + XGMAC_LPI_CTRL); +} + +static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_LPI_CTRL); + value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE); + writel(value, ioaddr + XGMAC_LPI_CTRL); +} + +static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_LPI_CTRL); + if (link) + value |= XGMAC_PLS; + else + value &= ~XGMAC_PLS; + writel(value, ioaddr + XGMAC_LPI_CTRL); +} + +static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = (tw & 0xffff) | ((ls & 0x3ff) << 16); + writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL); +} + static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits, int mcbitslog2) { @@ -402,36 +504,888 @@ static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable) writel(value, ioaddr + XGMAC_RX_CONFIG); } +static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx, + u32 val) +{ + u32 ctrl = 0; + + writel(val, ioaddr + XGMAC_RSS_DATA); + ctrl |= idx << XGMAC_RSSIA_SHIFT; + ctrl |= is_key ? XGMAC_ADDRT : 0x0; + ctrl |= XGMAC_OB; + writel(ctrl, ioaddr + XGMAC_RSS_ADDR); + + return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl, + !(ctrl & XGMAC_OB), 100, 10000); +} + +static int dwxgmac2_rss_configure(struct mac_device_info *hw, + struct stmmac_rss *cfg, u32 num_rxq) +{ + void __iomem *ioaddr = hw->pcsr; + u32 *key = (u32 *)cfg->key; + int i, ret; + u32 value; + + value = readl(ioaddr + XGMAC_RSS_CTRL); + if (!cfg->enable) { + value &= ~XGMAC_RSSE; + writel(value, ioaddr + XGMAC_RSS_CTRL); + return 0; + } + + for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) { + ret = dwxgmac2_rss_write_reg(ioaddr, true, i, *key++); + if (ret) + return ret; + } + + for (i = 0; i < ARRAY_SIZE(cfg->table); i++) { + ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]); + if (ret) + return ret; + } + + for (i = 0; i < num_rxq; i++) + dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH); + + value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE; + writel(value, ioaddr + XGMAC_RSS_CTRL); + return 0; +} + +static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, + bool is_double) +{ + void __iomem *ioaddr = hw->pcsr; + + writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE); + + if (hash) { + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + + value |= XGMAC_FILTER_VTFE; + + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV; + if (is_double) { + value |= XGMAC_VLAN_EDVLP; + value |= XGMAC_VLAN_ESVL; + value |= XGMAC_VLAN_DOVLTC; + } + + writel(value, ioaddr + XGMAC_VLAN_TAG); + } else { + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + + value &= ~XGMAC_FILTER_VTFE; + + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + value = readl(ioaddr + XGMAC_VLAN_TAG); + + value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV); + value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL); + value &= ~XGMAC_VLAN_DOVLTC; + value &= ~XGMAC_VLAN_VID; + + writel(value, ioaddr + XGMAC_VLAN_TAG); + } +} + +struct dwxgmac3_error_desc { + bool valid; + const char *desc; + const char *detailed_desc; +}; + +#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field) + +static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr, + const char *module_name, + const struct dwxgmac3_error_desc *desc, + unsigned long field_offset, + struct stmmac_safety_stats *stats) +{ + unsigned long loc, mask; + u8 *bptr = (u8 *)stats; + unsigned long *ptr; + + ptr = (unsigned long *)(bptr + field_offset); + + mask = value; + for_each_set_bit(loc, &mask, 32) { + netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ? + "correctable" : "uncorrectable", module_name, + desc[loc].desc, desc[loc].detailed_desc); + + /* Update counters */ + ptr[loc]++; + } +} + +static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= { + { true, "ATPES", "Application Transmit Interface Parity Check Error" }, + { true, "DPES", "Descriptor Cache Data Path Parity Check Error" }, + { true, "TPES", "TSO Data Path Parity Check Error" }, + { true, "TSOPES", "TSO Header Data Path Parity Check Error" }, + { true, "MTPES", "MTL Data Path Parity Check Error" }, + { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" }, + { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" }, + { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" }, + { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" }, + { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" }, + { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" }, + { true, "CWPES", "CSR Write Data Path Parity Check Error" }, + { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" }, + { true, "TTES", "TX FSM Timeout Error" }, + { true, "RTES", "RX FSM Timeout Error" }, + { true, "CTES", "CSR FSM Timeout Error" }, + { true, "ATES", "APP FSM Timeout Error" }, + { true, "PTES", "PTP FSM Timeout Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { true, "MSTTES", "Master Read/Write Timeout Error" }, + { true, "SLVTES", "Slave Read/Write Timeout Error" }, + { true, "ATITES", "Application Timeout on ATI Interface Error" }, + { true, "ARITES", "Application Timeout on ARI Interface Error" }, + { true, "FSMPES", "FSM State Parity Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { true, "CPI", "Control Register Parity Check Error" }, +}; + +static void dwxgmac3_handle_mac_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS); + writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS); + + dwxgmac3_log_error(ndev, value, correctable, "MAC", + dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats); +} + +static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= { + { true, "TXCES", "MTL TX Memory Error" }, + { true, "TXAMS", "MTL TX Memory Address Mismatch Error" }, + { true, "TXUES", "MTL TX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { true, "RXCES", "MTL RX Memory Error" }, + { true, "RXAMS", "MTL RX Memory Address Mismatch Error" }, + { true, "RXUES", "MTL RX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { true, "ECES", "MTL EST Memory Error" }, + { true, "EAMS", "MTL EST Memory Address Mismatch Error" }, + { true, "EUES", "MTL EST Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { true, "RPCES", "MTL RX Parser Memory Error" }, + { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" }, + { true, "RPUES", "MTL RX Parser Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwxgmac3_handle_mtl_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS); + writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS); + + dwxgmac3_log_error(ndev, value, correctable, "MTL", + dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats); +} + +static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= { + { true, "TCES", "DMA TSO Memory Error" }, + { true, "TAMS", "DMA TSO Memory Address Mismatch Error" }, + { true, "TUES", "DMA TSO Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { true, "DCES", "DMA DCACHE Memory Error" }, + { true, "DAMS", "DMA DCACHE Address Mismatch Error" }, + { true, "DUES", "DMA DCACHE Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { false, "UNKNOWN", "Unknown Error" }, /* 8 */ + { false, "UNKNOWN", "Unknown Error" }, /* 9 */ + { false, "UNKNOWN", "Unknown Error" }, /* 10 */ + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { false, "UNKNOWN", "Unknown Error" }, /* 12 */ + { false, "UNKNOWN", "Unknown Error" }, /* 13 */ + { false, "UNKNOWN", "Unknown Error" }, /* 14 */ + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwxgmac3_handle_dma_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS); + writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS); + + dwxgmac3_log_error(ndev, value, correctable, "DMA", + dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats); +} + +static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp) +{ + u32 value; + + if (!asp) + return -EINVAL; + + /* 1. Enable Safety Features */ + writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL); + + /* 2. Enable MTL Safety Interrupts */ + value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE); + value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */ + value |= XGMAC_ECEIE; /* EST Memory Correctable Error */ + value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */ + value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */ + writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE); + + /* 3. Enable DMA Safety Interrupts */ + value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE); + value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */ + value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */ + writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE); + + /* Only ECC Protection for External Memory feature is selected */ + if (asp <= 0x1) + return 0; + + /* 4. Enable Parity and Timeout for FSM */ + value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL); + value |= XGMAC_PRTYEN; /* FSM Parity Feature */ + value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */ + writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL); + + return 0; +} + +static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, + unsigned int asp, + struct stmmac_safety_stats *stats) +{ + bool err, corr; + u32 mtl, dma; + int ret = 0; + + if (!asp) + return -EINVAL; + + mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS); + dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS); + + err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS); + corr = false; + if (err) { + dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) || + (dma & (XGMAC_MSUIS | XGMAC_MSCIS)); + corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS); + if (err) { + dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + err = dma & (XGMAC_DEUIS | XGMAC_DECIS); + corr = dma & XGMAC_DECIS; + if (err) { + dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + return ret; +} + +static const struct dwxgmac3_error { + const struct dwxgmac3_error_desc *desc; +} dwxgmac3_all_errors[] = { + { dwxgmac3_mac_errors }, + { dwxgmac3_mtl_errors }, + { dwxgmac3_dma_errors }, +}; + +static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count, + const char **desc) +{ + int module = index / 32, offset = index % 32; + unsigned long *ptr = (unsigned long *)stats; + + if (module >= ARRAY_SIZE(dwxgmac3_all_errors)) + return -EINVAL; + if (!dwxgmac3_all_errors[module].desc[offset].valid) + return -EINVAL; + if (count) + *count = *(ptr + index); + if (desc) + *desc = dwxgmac3_all_errors[module].desc[offset].desc; + return 0; +} + +static int dwxgmac3_rxp_disable(void __iomem *ioaddr) +{ + u32 val = readl(ioaddr + XGMAC_MTL_OPMODE); + + val &= ~XGMAC_FRPE; + writel(val, ioaddr + XGMAC_MTL_OPMODE); + + return 0; +} + +static void dwxgmac3_rxp_enable(void __iomem *ioaddr) +{ + u32 val; + + val = readl(ioaddr + XGMAC_MTL_OPMODE); + val |= XGMAC_FRPE; + writel(val, ioaddr + XGMAC_MTL_OPMODE); +} + +static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr, + struct stmmac_tc_entry *entry, + int pos) +{ + int ret, i; + + for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) { + int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i; + u32 val; + + /* Wait for ready */ + ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST, + val, !(val & XGMAC_STARTBUSY), 1, 10000); + if (ret) + return ret; + + /* Write data */ + val = *((u32 *)&entry->val + i); + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA); + + /* Write pos */ + val = real_pos & XGMAC_ADDR; + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST); + + /* Write OP */ + val |= XGMAC_WRRDN; + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST); + + /* Start Write */ + val |= XGMAC_STARTBUSY; + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST); + + /* Wait for done */ + ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST, + val, !(val & XGMAC_STARTBUSY), 1, 10000); + if (ret) + return ret; + } + + return 0; +} + +static struct stmmac_tc_entry * +dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries, + unsigned int count, u32 curr_prio) +{ + struct stmmac_tc_entry *entry; + u32 min_prio = ~0x0; + int i, min_prio_idx; + bool found = false; + + for (i = count - 1; i >= 0; i--) { + entry = &entries[i]; + + /* Do not update unused entries */ + if (!entry->in_use) + continue; + /* Do not update already updated entries (i.e. fragments) */ + if (entry->in_hw) + continue; + /* Let last entry be updated last */ + if (entry->is_last) + continue; + /* Do not return fragments */ + if (entry->is_frag) + continue; + /* Check if we already checked this prio */ + if (entry->prio < curr_prio) + continue; + /* Check if this is the minimum prio */ + if (entry->prio < min_prio) { + min_prio = entry->prio; + min_prio_idx = i; + found = true; + } + } + + if (found) + return &entries[min_prio_idx]; + return NULL; +} + +static int dwxgmac3_rxp_config(void __iomem *ioaddr, + struct stmmac_tc_entry *entries, + unsigned int count) +{ + struct stmmac_tc_entry *entry, *frag; + int i, ret, nve = 0; + u32 curr_prio = 0; + u32 old_val, val; + + /* Force disable RX */ + old_val = readl(ioaddr + XGMAC_RX_CONFIG); + val = old_val & ~XGMAC_CONFIG_RE; + writel(val, ioaddr + XGMAC_RX_CONFIG); + + /* Disable RX Parser */ + ret = dwxgmac3_rxp_disable(ioaddr); + if (ret) + goto re_enable; + + /* Set all entries as NOT in HW */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + entry->in_hw = false; + } + + /* Update entries by reverse order */ + while (1) { + entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio); + if (!entry) + break; + + curr_prio = entry->prio; + frag = entry->frag_ptr; + + /* Set special fragment requirements */ + if (frag) { + entry->val.af = 0; + entry->val.rf = 0; + entry->val.nc = 1; + entry->val.ok_index = nve + 2; + } + + ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + entry->in_hw = true; + + if (frag && !frag->in_hw) { + ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve); + if (ret) + goto re_enable; + frag->table_pos = nve++; + frag->in_hw = true; + } + } + + if (!nve) + goto re_enable; + + /* Update all pass entry */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + if (!entry->is_last) + continue; + + ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + } + + /* Assume n. of parsable entries == n. of valid entries */ + val = (nve << 16) & XGMAC_NPE; + val |= nve & XGMAC_NVE; + writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS); + + /* Enable RX Parser */ + dwxgmac3_rxp_enable(ioaddr); + +re_enable: + /* Re-enable RX */ + writel(old_val, ioaddr + XGMAC_RX_CONFIG); + return ret; +} + +static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS, + value, value & XGMAC_TXTSC, 100, 10000)) + return -EBUSY; + + *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO; + *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL; + return 0; +} + +static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags) +{ + u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index)); + u32 val = readl(ioaddr + XGMAC_PPS_CONTROL); + u64 period; + + if (!cfg->available) + return -EINVAL; + if (tnsec & XGMAC_TRGTBUSY0) + return -EBUSY; + if (!sub_second_inc || !systime_flags) + return -EINVAL; + + val &= ~XGMAC_PPSx_MASK(index); + + if (!enable) { + val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP); + writel(val, ioaddr + XGMAC_PPS_CONTROL); + return 0; + } + + val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START); + val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START); + val |= XGMAC_PPSEN0; + + writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index)); + + if (!(systime_flags & PTP_TCR_TSCTRLSSR)) + cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465; + writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index)); + + period = cfg->period.tv_sec * 1000000000; + period += cfg->period.tv_nsec; + + do_div(period, sub_second_inc); + + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index)); + + period >>= 1; + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index)); + + /* Finally, activate it */ + writel(val, ioaddr + XGMAC_PPS_CONTROL); + return 0; +} + +static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val) +{ + u32 value = readl(ioaddr + XGMAC_TX_CONFIG); + + value &= ~XGMAC_CONFIG_SARC; + value |= val << XGMAC_CONFIG_SARC_SHIFT; + + writel(value, ioaddr + XGMAC_TX_CONFIG); +} + +static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_VLAN_INCL); + value |= XGMAC_VLAN_VLTI; + value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */ + value &= ~XGMAC_VLAN_VLC; + value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC; + writel(value, ioaddr + XGMAC_VLAN_INCL); +} + +static int dwxgmac2_filter_wait(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value, + !(value & XGMAC_XB), 100, 10000)) + return -EBUSY; + return 0; +} + +static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no, + u8 reg, u32 *data) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + ret = dwxgmac2_filter_wait(hw); + if (ret) + return ret; + + value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT; + value |= XGMAC_TT | XGMAC_XB; + writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL); + + ret = dwxgmac2_filter_wait(hw); + if (ret) + return ret; + + *data = readl(ioaddr + XGMAC_L3L4_DATA); + return 0; +} + +static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no, + u8 reg, u32 data) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + ret = dwxgmac2_filter_wait(hw); + if (ret) + return ret; + + writel(data, ioaddr + XGMAC_L3L4_DATA); + + value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT; + value |= XGMAC_XB; + writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL); + + return dwxgmac2_filter_wait(hw); +} + +static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no, + bool en, bool ipv6, bool sa, bool inv, + u32 match) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + value = readl(ioaddr + XGMAC_PACKET_FILTER); + value |= XGMAC_FILTER_IPFE; + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value); + if (ret) + return ret; + + /* For IPv6 not both SA/DA filters can be active */ + if (ipv6) { + value |= XGMAC_L3PEN0; + value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0); + value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0); + if (sa) { + value |= XGMAC_L3SAM0; + if (inv) + value |= XGMAC_L3SAIM0; + } else { + value |= XGMAC_L3DAM0; + if (inv) + value |= XGMAC_L3DAIM0; + } + } else { + value &= ~XGMAC_L3PEN0; + if (sa) { + value |= XGMAC_L3SAM0; + if (inv) + value |= XGMAC_L3SAIM0; + } else { + value |= XGMAC_L3DAM0; + if (inv) + value |= XGMAC_L3DAIM0; + } + } + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value); + if (ret) + return ret; + + if (sa) { + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match); + if (ret) + return ret; + } else { + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match); + if (ret) + return ret; + } + + if (!en) + return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0); + + return 0; +} + +static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no, + bool en, bool udp, bool sa, bool inv, + u32 match) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + value = readl(ioaddr + XGMAC_PACKET_FILTER); + value |= XGMAC_FILTER_IPFE; + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value); + if (ret) + return ret; + + if (udp) { + value |= XGMAC_L4PEN0; + } else { + value &= ~XGMAC_L4PEN0; + } + + value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0); + value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0); + if (sa) { + value |= XGMAC_L4SPM0; + if (inv) + value |= XGMAC_L4SPIM0; + } else { + value |= XGMAC_L4DPM0; + if (inv) + value |= XGMAC_L4DPIM0; + } + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value); + if (ret) + return ret; + + if (sa) { + value = match & XGMAC_L4SP0; + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value); + if (ret) + return ret; + } else { + value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0; + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value); + if (ret) + return ret; + } + + if (!en) + return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0); + + return 0; +} + +static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en, + u32 addr) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(addr, ioaddr + XGMAC_ARP_ADDR); + + value = readl(ioaddr + XGMAC_RX_CONFIG); + if (en) + value |= XGMAC_CONFIG_ARPEN; + else + value &= ~XGMAC_CONFIG_ARPEN; + writel(value, ioaddr + XGMAC_RX_CONFIG); +} + const struct stmmac_ops dwxgmac210_ops = { .core_init = dwxgmac2_core_init, .set_mac = dwxgmac2_set_mac, .rx_ipc = dwxgmac2_rx_ipc, .rx_queue_enable = dwxgmac2_rx_queue_enable, .rx_queue_prio = dwxgmac2_rx_queue_prio, - .tx_queue_prio = NULL, + .tx_queue_prio = dwxgmac2_tx_queue_prio, .rx_queue_routing = NULL, .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms, .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, - .set_mtl_tx_queue_weight = NULL, + .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight, .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma, .config_cbs = dwxgmac2_config_cbs, - .dump_regs = NULL, + .dump_regs = dwxgmac2_dump_regs, .host_irq_status = dwxgmac2_host_irq_status, .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status, .flow_ctrl = dwxgmac2_flow_ctrl, .pmt = dwxgmac2_pmt, .set_umac_addr = dwxgmac2_set_umac_addr, .get_umac_addr = dwxgmac2_get_umac_addr, - .set_eee_mode = NULL, - .reset_eee_mode = NULL, - .set_eee_timer = NULL, - .set_eee_pls = NULL, + .set_eee_mode = dwxgmac2_set_eee_mode, + .reset_eee_mode = dwxgmac2_reset_eee_mode, + .set_eee_timer = dwxgmac2_set_eee_timer, + .set_eee_pls = dwxgmac2_set_eee_pls, .pcs_ctrl_ane = NULL, .pcs_rane = NULL, .pcs_get_adv_lp = NULL, .debug = NULL, .set_filter = dwxgmac2_set_filter, + .safety_feat_config = dwxgmac3_safety_feat_config, + .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status, + .safety_feat_dump = dwxgmac3_safety_feat_dump, .set_mac_loopback = dwxgmac2_set_mac_loopback, + .rss_configure = dwxgmac2_rss_configure, + .update_vlan_hash = dwxgmac2_update_vlan_hash, + .rxp_config = dwxgmac3_rxp_config, + .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp, + .flex_pps_config = dwxgmac2_flex_pps_config, + .sarc_configure = dwxgmac2_sarc_configure, + .enable_vlan = dwxgmac2_enable_vlan, + .config_l3_filter = dwxgmac2_config_l3_filter, + .config_l4_filter = dwxgmac2_config_l4_filter, + .set_arp_offload = dwxgmac2_set_arp_offload, }; int dwxgmac2_setup(struct stmmac_priv *priv) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index c4c45402b8f8..ae48154f933c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -26,16 +26,17 @@ static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p) { unsigned int rdes3 = le32_to_cpu(p->des3); - int ret = good_frame; if (unlikely(rdes3 & XGMAC_RDES3_OWN)) return dma_own; + if (unlikely(rdes3 & XGMAC_RDES3_CTXT)) + return discard_frame; if (likely(!(rdes3 & XGMAC_RDES3_LD))) + return rx_not_ls; + if (unlikely((rdes3 & XGMAC_RDES3_ES) && (rdes3 & XGMAC_RDES3_LD))) return discard_frame; - if (unlikely(rdes3 & XGMAC_RDES3_ES)) - ret = discard_frame; - return ret; + return good_frame; } static int dwxgmac2_get_tx_len(struct dma_desc *p) @@ -55,7 +56,7 @@ static void dwxgmac2_set_tx_owner(struct dma_desc *p) static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic) { - p->des3 = cpu_to_le32(XGMAC_RDES3_OWN); + p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN); if (!disable_rx_ic) p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC); @@ -98,11 +99,17 @@ static int dwxgmac2_rx_check_timestamp(void *desc) unsigned int rdes3 = le32_to_cpu(p->des3); bool desc_valid, ts_valid; + dma_rmb(); + desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT); ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA); - if (likely(desc_valid && ts_valid)) + if (likely(desc_valid && ts_valid)) { + if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) + return -EINVAL; return 0; + } + return -EINVAL; } @@ -113,13 +120,10 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, unsigned int rdes3 = le32_to_cpu(p->des3); int ret = -EBUSY; - if (likely(rdes3 & XGMAC_RDES3_CDA)) { + if (likely(rdes3 & XGMAC_RDES3_CDA)) ret = dwxgmac2_rx_check_timestamp(next_desc); - if (ret) - return ret; - } - return ret; + return !ret; } static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, @@ -144,7 +148,7 @@ static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L); - tdes3 = tot_pkt_len & XGMAC_TDES3_FL; + tdes3 |= tot_pkt_len & XGMAC_TDES3_FL; if (is_fs) tdes3 |= XGMAC_TDES3_FD; else @@ -254,6 +258,86 @@ static void dwxgmac2_clear(struct dma_desc *p) p->des3 = 0; } +static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash, + enum pkt_hash_types *type) +{ + unsigned int rdes3 = le32_to_cpu(p->des3); + u32 ptype; + + if (rdes3 & XGMAC_RDES3_RSV) { + ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT; + + switch (ptype) { + case XGMAC_L34T_IP4TCP: + case XGMAC_L34T_IP4UDP: + case XGMAC_L34T_IP6TCP: + case XGMAC_L34T_IP6UDP: + *type = PKT_HASH_TYPE_L4; + break; + default: + *type = PKT_HASH_TYPE_L3; + break; + } + + *hash = le32_to_cpu(p->des1); + return 0; + } + + return -EINVAL; +} + +static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len) +{ + *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL; + return 0; +} + +static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des2 = cpu_to_le32(lower_32_bits(addr)); + p->des3 = cpu_to_le32(upper_32_bits(addr)); +} + +static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type) +{ + sarc_type <<= XGMAC_TDES3_SAIC_SHIFT; + + p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC); +} + +static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag, + u32 inner_type) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; + + /* Inner VLAN */ + if (inner_type) { + u32 des = inner_tag << XGMAC_TDES2_IVT_SHIFT; + + des &= XGMAC_TDES2_IVT; + p->des2 = cpu_to_le32(des); + + des = inner_type << XGMAC_TDES3_IVTIR_SHIFT; + des &= XGMAC_TDES3_IVTIR; + p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV); + } + + /* Outer VLAN */ + p->des3 |= cpu_to_le32(tag & XGMAC_TDES3_VT); + p->des3 |= cpu_to_le32(XGMAC_TDES3_VLTV); + + p->des3 |= cpu_to_le32(XGMAC_TDES3_CTXT); +} + +static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type) +{ + type <<= XGMAC_TDES2_VTIR_SHIFT; + p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR); +} + const struct stmmac_desc_ops dwxgmac210_desc_ops = { .tx_status = dwxgmac2_get_tx_status, .rx_status = dwxgmac2_get_rx_status, @@ -277,4 +361,10 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = { .get_addr = dwxgmac2_get_addr, .set_addr = dwxgmac2_set_addr, .clear = dwxgmac2_clear, + .get_rx_hash = dwxgmac2_get_rx_hash, + .get_rx_header_len = dwxgmac2_get_rx_header_len, + .set_sec_addr = dwxgmac2_set_sec_addr, + .set_sarc = dwxgmac2_set_sarc, + .set_vlan_tag = dwxgmac2_set_vlan_tag, + .set_vlan = dwxgmac2_set_vlan, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index a4f236e3593e..53c4a40d8386 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -128,6 +128,14 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL); } +static void dwxgmac2_dma_dump_regs(void __iomem *ioaddr, u32 *reg_space) +{ + int i; + + for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++) + reg_space[i] = readl(ioaddr + i * 4); +} + static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode) { @@ -314,6 +322,10 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, /* ABNORMAL interrupts */ if (unlikely(intr_status & XGMAC_AIS)) { + if (unlikely(intr_status & XGMAC_RBU)) { + x->rx_buf_unav_irq++; + ret |= handle_rx; + } if (unlikely(intr_status & XGMAC_TPS)) { x->tx_process_stopped_irq++; ret |= tx_hard_error; @@ -351,18 +363,26 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr, /* MAC HW feature 0 */ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0); + dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27; dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16; dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14; + dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13; dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12; dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11; - dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10; + dma_cap->av &= !(hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10; + dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9; + dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8; dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7; dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6; + dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4; dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1; /* MAC HW feature 1 */ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1); + dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27; + dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20; dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18; + dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17; dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14; switch (dma_cap->addr64) { @@ -396,6 +416,14 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr, ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1; dma_cap->number_rx_queues = ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1; + + /* MAC HW feature 3 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3); + dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14; + dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13; + dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11; + dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9; + dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3; } static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan) @@ -462,6 +490,22 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); } +static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + XGMAC_RX_CONFIG); + + value &= ~XGMAC_CONFIG_HDSMS; + value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ + writel(value, ioaddr + XGMAC_RX_CONFIG); + + value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan)); + if (en) + value |= XGMAC_SPH; + else + value &= ~XGMAC_SPH; + writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan)); +} + const struct stmmac_dma_ops dwxgmac210_dma_ops = { .reset = dwxgmac2_dma_reset, .init = dwxgmac2_dma_init, @@ -469,7 +513,7 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = { .init_rx_chan = dwxgmac2_dma_init_rx_chan, .init_tx_chan = dwxgmac2_dma_init_tx_chan, .axi = dwxgmac2_dma_axi, - .dump_regs = NULL, + .dump_regs = dwxgmac2_dma_dump_regs, .dma_rx_mode = dwxgmac2_dma_rx_mode, .dma_tx_mode = dwxgmac2_dma_tx_mode, .enable_dma_irq = dwxgmac2_enable_dma_irq, @@ -488,4 +532,5 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = { .enable_tso = dwxgmac2_enable_tso, .qmode = dwxgmac2_qmode, .set_bfsize = dwxgmac2_set_bfsize, + .enable_sph = dwxgmac2_enable_sph, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 6c61b753b55e..3af2e5015245 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -201,7 +201,7 @@ static const struct stmmac_hwif_entry { .min_id = DWXGMAC_CORE_2_10, .regs = { .ptp_off = PTP_XGMAC_OFFSET, - .mmc_off = 0, + .mmc_off = MMC_XGMAC_OFFSET, }, .desc = &dwxgmac210_desc_ops, .dma = &dwxgmac210_dma_ops, @@ -209,7 +209,7 @@ static const struct stmmac_hwif_entry { .hwtimestamp = &stmmac_ptp, .mode = NULL, .tc = &dwmac510_tc_ops, - .mmc = NULL, + .mmc = &dwxgmac_mmc_ops, .setup = dwxgmac2_setup, .quirks = NULL, }, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 278c0dbec9d9..ddb851d99618 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -86,6 +86,15 @@ struct stmmac_desc_ops { void (*set_addr)(struct dma_desc *p, dma_addr_t addr); /* clear descriptor */ void (*clear)(struct dma_desc *p); + /* RSS */ + int (*get_rx_hash)(struct dma_desc *p, u32 *hash, + enum pkt_hash_types *type); + int (*get_rx_header_len)(struct dma_desc *p, unsigned int *len); + void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr); + void (*set_sarc)(struct dma_desc *p, u32 sarc_type); + void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag, + u32 inner_type); + void (*set_vlan)(struct dma_desc *p, u32 type); }; #define stmmac_init_rx_desc(__priv, __args...) \ @@ -136,6 +145,18 @@ struct stmmac_desc_ops { stmmac_do_void_callback(__priv, desc, set_addr, __args) #define stmmac_clear_desc(__priv, __args...) \ stmmac_do_void_callback(__priv, desc, clear, __args) +#define stmmac_get_rx_hash(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_hash, __args) +#define stmmac_get_rx_header_len(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_header_len, __args) +#define stmmac_set_desc_sec_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_sec_addr, __args) +#define stmmac_set_desc_sarc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_sarc, __args) +#define stmmac_set_desc_vlan_tag(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args) +#define stmmac_set_desc_vlan(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_vlan, __args) struct stmmac_dma_cfg; struct dma_features; @@ -186,6 +207,7 @@ struct stmmac_dma_ops { void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode); void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); + void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan); }; #define stmmac_reset(__priv, __args...) \ @@ -242,6 +264,8 @@ struct stmmac_dma_ops { stmmac_do_void_callback(__priv, dma, qmode, __args) #define stmmac_set_dma_bfsize(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, set_bfsize, __args) +#define stmmac_enable_sph(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_sph, __args) struct mac_device_info; struct net_device; @@ -249,6 +273,7 @@ struct rgmii_adv; struct stmmac_safety_stats; struct stmmac_tc_entry; struct stmmac_pps_cfg; +struct stmmac_rss; /* Helpers to program the MAC core */ struct stmmac_ops { @@ -327,6 +352,25 @@ struct stmmac_ops { u32 sub_second_inc, u32 systime_flags); /* Loopback for selftests */ void (*set_mac_loopback)(void __iomem *ioaddr, bool enable); + /* RSS */ + int (*rss_configure)(struct mac_device_info *hw, + struct stmmac_rss *cfg, u32 num_rxq); + /* VLAN */ + void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash, + bool is_double); + void (*enable_vlan)(struct mac_device_info *hw, u32 type); + /* TX Timestamp */ + int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts); + /* Source Address Insertion / Replacement */ + void (*sarc_configure)(void __iomem *ioaddr, int val); + /* Filtering */ + int (*config_l3_filter)(struct mac_device_info *hw, u32 filter_no, + bool en, bool ipv6, bool sa, bool inv, + u32 match); + int (*config_l4_filter)(struct mac_device_info *hw, u32 filter_no, + bool en, bool udp, bool sa, bool inv, + u32 match); + void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr); }; #define stmmac_core_init(__priv, __args...) \ @@ -397,6 +441,22 @@ struct stmmac_ops { stmmac_do_callback(__priv, mac, flex_pps_config, __args) #define stmmac_set_mac_loopback(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args) +#define stmmac_rss_configure(__priv, __args...) \ + stmmac_do_callback(__priv, mac, rss_configure, __args) +#define stmmac_update_vlan_hash(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args) +#define stmmac_enable_vlan(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, enable_vlan, __args) +#define stmmac_get_mac_tx_timestamp(__priv, __args...) \ + stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args) +#define stmmac_sarc_configure(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, sarc_configure, __args) +#define stmmac_config_l3_filter(__priv, __args...) \ + stmmac_do_callback(__priv, mac, config_l3_filter, __args) +#define stmmac_config_l4_filter(__priv, __args...) \ + stmmac_do_callback(__priv, mac, config_l4_filter, __args) +#define stmmac_set_arp_offload(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_arp_offload, __args) /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { @@ -453,6 +513,7 @@ struct stmmac_mode_ops { struct stmmac_priv; struct tc_cls_u32_offload; struct tc_cbs_qopt_offload; +struct flow_cls_offload; struct stmmac_tc_ops { int (*init)(struct stmmac_priv *priv); @@ -460,6 +521,8 @@ struct stmmac_tc_ops { struct tc_cls_u32_offload *cls); int (*setup_cbs)(struct stmmac_priv *priv, struct tc_cbs_qopt_offload *qopt); + int (*setup_cls)(struct stmmac_priv *priv, + struct flow_cls_offload *cls); }; #define stmmac_tc_init(__priv, __args...) \ @@ -468,6 +531,8 @@ struct stmmac_tc_ops { stmmac_do_callback(__priv, tc, setup_cls_u32, __args) #define stmmac_tc_setup_cbs(__priv, __args...) \ stmmac_do_callback(__priv, tc, setup_cbs, __args) +#define stmmac_tc_setup_cls(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cls, __args) struct stmmac_counters; @@ -503,6 +568,7 @@ extern const struct stmmac_ops dwxgmac210_ops; extern const struct stmmac_dma_ops dwxgmac210_dma_ops; extern const struct stmmac_desc_ops dwxgmac210_desc_ops; extern const struct stmmac_mmc_ops dwmac_mmc_ops; +extern const struct stmmac_mmc_ops dwxgmac_mmc_ops; #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ #define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 3587ceb9faf5..a0c05925883e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -24,6 +24,7 @@ #define MMC_GMAC4_OFFSET 0x700 #define MMC_GMAC3_X_OFFSET 0x100 +#define MMC_XGMAC_OFFSET 0x800 struct stmmac_counters { unsigned int mmc_tx_octetcount_gb; @@ -116,6 +117,14 @@ struct stmmac_counters { unsigned int mmc_rx_tcp_err_octets; unsigned int mmc_rx_icmp_gd_octets; unsigned int mmc_rx_icmp_err_octets; + + /* FPE */ + unsigned int mmc_tx_fpe_fragment_cntr; + unsigned int mmc_tx_hold_req_cntr; + unsigned int mmc_rx_packet_assembly_err_cntr; + unsigned int mmc_rx_packet_smd_err_cntr; + unsigned int mmc_rx_packet_assembly_ok_cntr; + unsigned int mmc_rx_fpe_fragment_cntr; }; #endif /* __MMC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index a471db6d7b11..a223584f5f9a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -119,6 +119,64 @@ #define MMC_RX_ICMP_GD_OCTETS 0x180 #define MMC_RX_ICMP_ERR_OCTETS 0x184 +/* XGMAC MMC Registers */ +#define MMC_XGMAC_TX_OCTET_GB 0x14 +#define MMC_XGMAC_TX_PKT_GB 0x1c +#define MMC_XGMAC_TX_BROAD_PKT_G 0x24 +#define MMC_XGMAC_TX_MULTI_PKT_G 0x2c +#define MMC_XGMAC_TX_64OCT_GB 0x34 +#define MMC_XGMAC_TX_65OCT_GB 0x3c +#define MMC_XGMAC_TX_128OCT_GB 0x44 +#define MMC_XGMAC_TX_256OCT_GB 0x4c +#define MMC_XGMAC_TX_512OCT_GB 0x54 +#define MMC_XGMAC_TX_1024OCT_GB 0x5c +#define MMC_XGMAC_TX_UNI_PKT_GB 0x64 +#define MMC_XGMAC_TX_MULTI_PKT_GB 0x6c +#define MMC_XGMAC_TX_BROAD_PKT_GB 0x74 +#define MMC_XGMAC_TX_UNDER 0x7c +#define MMC_XGMAC_TX_OCTET_G 0x84 +#define MMC_XGMAC_TX_PKT_G 0x8c +#define MMC_XGMAC_TX_PAUSE 0x94 +#define MMC_XGMAC_TX_VLAN_PKT_G 0x9c +#define MMC_XGMAC_TX_LPI_USEC 0xa4 +#define MMC_XGMAC_TX_LPI_TRAN 0xa8 + +#define MMC_XGMAC_RX_PKT_GB 0x100 +#define MMC_XGMAC_RX_OCTET_GB 0x108 +#define MMC_XGMAC_RX_OCTET_G 0x110 +#define MMC_XGMAC_RX_BROAD_PKT_G 0x118 +#define MMC_XGMAC_RX_MULTI_PKT_G 0x120 +#define MMC_XGMAC_RX_CRC_ERR 0x128 +#define MMC_XGMAC_RX_RUNT_ERR 0x130 +#define MMC_XGMAC_RX_JABBER_ERR 0x134 +#define MMC_XGMAC_RX_UNDER 0x138 +#define MMC_XGMAC_RX_OVER 0x13c +#define MMC_XGMAC_RX_64OCT_GB 0x140 +#define MMC_XGMAC_RX_65OCT_GB 0x148 +#define MMC_XGMAC_RX_128OCT_GB 0x150 +#define MMC_XGMAC_RX_256OCT_GB 0x158 +#define MMC_XGMAC_RX_512OCT_GB 0x160 +#define MMC_XGMAC_RX_1024OCT_GB 0x168 +#define MMC_XGMAC_RX_UNI_PKT_G 0x170 +#define MMC_XGMAC_RX_LENGTH_ERR 0x178 +#define MMC_XGMAC_RX_RANGE 0x180 +#define MMC_XGMAC_RX_PAUSE 0x188 +#define MMC_XGMAC_RX_FIFOOVER_PKT 0x190 +#define MMC_XGMAC_RX_VLAN_PKT_GB 0x198 +#define MMC_XGMAC_RX_WATCHDOG_ERR 0x1a0 +#define MMC_XGMAC_RX_LPI_USEC 0x1a4 +#define MMC_XGMAC_RX_LPI_TRAN 0x1a8 +#define MMC_XGMAC_RX_DISCARD_PKT_GB 0x1ac +#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4 +#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc + +#define MMC_XGMAC_TX_FPE_FRAG 0x208 +#define MMC_XGMAC_TX_HOLD_REQ 0x20c +#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228 +#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c +#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230 +#define MMC_XGMAC_RX_FPE_FRAG 0x234 + static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) { u32 value = readl(mmcaddr + MMC_CNTRL); @@ -263,3 +321,137 @@ const struct stmmac_mmc_ops dwmac_mmc_ops = { .intr_all_mask = dwmac_mmc_intr_all_mask, .read = dwmac_mmc_read, }; + +static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) +{ + u32 value = readl(mmcaddr + MMC_CNTRL); + + value |= (mode & 0x3F); + + writel(value, mmcaddr + MMC_CNTRL); +} + +static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr) +{ + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK); +} + +static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest) +{ + u64 tmp = 0; + + tmp += readl(addr + reg); + tmp += ((u64 )readl(addr + reg + 0x4)) << 32; + if (tmp > GENMASK(31, 0)) + *dest = ~0x0; + else + *dest = *dest + tmp; +} + +/* This reads the MAC core counters (if actaully supported). + * by default the MMC core is programmed to reset each + * counter after a read. So all the field of the mmc struct + * have to be incremented. + */ +static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc) +{ + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_GB, + &mmc->mmc_tx_octetcount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_GB, + &mmc->mmc_tx_framecount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_G, + &mmc->mmc_tx_broadcastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_G, + &mmc->mmc_tx_multicastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_64OCT_GB, + &mmc->mmc_tx_64_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_65OCT_GB, + &mmc->mmc_tx_65_to_127_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_128OCT_GB, + &mmc->mmc_tx_128_to_255_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_256OCT_GB, + &mmc->mmc_tx_256_to_511_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_512OCT_GB, + &mmc->mmc_tx_512_to_1023_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_1024OCT_GB, + &mmc->mmc_tx_1024_to_max_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNI_PKT_GB, + &mmc->mmc_tx_unicast_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_GB, + &mmc->mmc_tx_multicast_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_GB, + &mmc->mmc_tx_broadcast_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNDER, + &mmc->mmc_tx_underflow_error); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_G, + &mmc->mmc_tx_octetcount_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_G, + &mmc->mmc_tx_framecount_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PAUSE, + &mmc->mmc_tx_pause_frame); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G, + &mmc->mmc_tx_vlan_frame_g); + + /* MMC RX counter registers */ + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB, + &mmc->mmc_rx_framecount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_GB, + &mmc->mmc_rx_octetcount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_G, + &mmc->mmc_rx_octetcount_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_BROAD_PKT_G, + &mmc->mmc_rx_broadcastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_MULTI_PKT_G, + &mmc->mmc_rx_multicastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR, + &mmc->mmc_rx_crc_error); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR, + &mmc->mmc_rx_crc_error); + mmc->mmc_rx_run_error += readl(mmcaddr + MMC_XGMAC_RX_RUNT_ERR); + mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_XGMAC_RX_JABBER_ERR); + mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_XGMAC_RX_UNDER); + mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_XGMAC_RX_OVER); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_64OCT_GB, + &mmc->mmc_rx_64_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_65OCT_GB, + &mmc->mmc_rx_65_to_127_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_128OCT_GB, + &mmc->mmc_rx_128_to_255_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_256OCT_GB, + &mmc->mmc_rx_256_to_511_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_512OCT_GB, + &mmc->mmc_rx_512_to_1023_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_1024OCT_GB, + &mmc->mmc_rx_1024_to_max_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UNI_PKT_G, + &mmc->mmc_rx_unicast_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_LENGTH_ERR, + &mmc->mmc_rx_length_error); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_RANGE, + &mmc->mmc_rx_autofrangetype); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PAUSE, + &mmc->mmc_rx_pause_frames); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_FIFOOVER_PKT, + &mmc->mmc_rx_fifo_overflow); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB, + &mmc->mmc_rx_vlan_frames_gb); + mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR); + + mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG); + mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ); + mmc->mmc_rx_packet_assembly_err_cntr += + readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR); + mmc->mmc_rx_packet_smd_err_cntr += + readl(mmcaddr + MMC_XGMAC_RX_PKT_SMD_ERR); + mmc->mmc_rx_packet_assembly_ok_cntr += + readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK); + mmc->mmc_rx_fpe_fragment_cntr += + readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG); +} + +const struct stmmac_mmc_ops dwxgmac_mmc_ops = { + .ctrl = dwxgmac_mmc_ctrl, + .intr_all_mask = dwxgmac_mmc_intr_all_mask, + .read = dwxgmac_mmc_read, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 5cd966c154f3..d993fc7e82c3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -13,6 +13,7 @@ #define DRV_MODULE_VERSION "Jan_2016" #include <linux/clk.h> +#include <linux/if_vlan.h> #include <linux/stmmac.h> #include <linux/phylink.h> #include <linux/pci.h> @@ -57,7 +58,9 @@ struct stmmac_tx_queue { struct stmmac_rx_buffer { struct page *page; + struct page *sec_page; dma_addr_t addr; + dma_addr_t sec_addr; }; struct stmmac_rx_queue { @@ -73,6 +76,12 @@ struct stmmac_rx_queue { u32 rx_zeroc_thresh; dma_addr_t dma_rx_phy; u32 rx_tail_addr; + unsigned int state_saved; + struct { + struct sk_buff *skb; + unsigned int len; + unsigned int error; + } state; }; struct stmmac_channel { @@ -113,6 +122,22 @@ struct stmmac_pps_cfg { struct timespec64 period; }; +struct stmmac_rss { + int enable; + u8 key[STMMAC_RSS_HASH_KEY_SIZE]; + u32 table[STMMAC_RSS_MAX_TABLE_SIZE]; +}; + +#define STMMAC_FLOW_ACTION_DROP BIT(0) +struct stmmac_flow_entry { + unsigned long cookie; + unsigned long action; + u8 ip_proto; + int in_use; + int idx; + int is_l4; +}; + struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ u32 tx_coal_frames; @@ -123,6 +148,8 @@ struct stmmac_priv { int hwts_tx_en; bool tx_path_in_lpi_mode; bool tso; + int sph; + u32 sarc_type; unsigned int dma_buf_sz; unsigned int rx_copybreak; @@ -185,11 +212,10 @@ struct stmmac_priv { spinlock_t ptp_lock; void __iomem *mmcaddr; void __iomem *ptpaddr; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; #ifdef CONFIG_DEBUG_FS struct dentry *dbgfs_dir; - struct dentry *dbgfs_rings_status; - struct dentry *dbgfs_dma_cap; #endif unsigned long state; @@ -200,9 +226,14 @@ struct stmmac_priv { unsigned int tc_entries_max; unsigned int tc_off_max; struct stmmac_tc_entry *tc_entries; + unsigned int flow_entries_max; + struct stmmac_flow_entry *flow_entries; /* Pulse Per Second output */ struct stmmac_pps_cfg pps[STMMAC_PPS_MAX]; + + /* Receive Side Scaling */ + struct stmmac_rss rss; }; enum stmmac_state { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 6efb66820d4c..1a768837ca72 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -18,10 +18,12 @@ #include "stmmac.h" #include "dwmac_dma.h" +#include "dwxgmac2.h" #define REG_SPACE_SIZE 0x1060 #define MAC100_ETHTOOL_NAME "st_mac100" #define GMAC_ETHTOOL_NAME "st_gmac" +#define XGMAC_ETHTOOL_NAME "st_xgmac" #define ETHTOOL_DMA_OFFSET 55 @@ -65,6 +67,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(rx_missed_cntr), STMMAC_STAT(rx_overflow_cntr), STMMAC_STAT(rx_vlan), + STMMAC_STAT(rx_split_hdr_pkt_n), /* Tx/Rx IRQ error info */ STMMAC_STAT(tx_undeflow_irq), STMMAC_STAT(tx_process_stopped_irq), @@ -243,6 +246,12 @@ static const struct stmmac_stats stmmac_mmc[] = { STMMAC_MMC_STAT(mmc_rx_tcp_err_octets), STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets), STMMAC_MMC_STAT(mmc_rx_icmp_err_octets), + STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr), + STMMAC_MMC_STAT(mmc_tx_hold_req_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr), + STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr), }; #define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc) @@ -253,6 +262,8 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev, if (priv->plat->has_gmac || priv->plat->has_gmac4) strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); + else if (priv->plat->has_xgmac) + strlcpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver)); else strlcpy(info->driver, MAC100_ETHTOOL_NAME, sizeof(info->driver)); @@ -398,23 +409,28 @@ static int stmmac_check_if_running(struct net_device *dev) static int stmmac_ethtool_get_regs_len(struct net_device *dev) { + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->plat->has_xgmac) + return XGMAC_REGSIZE * 4; return REG_SPACE_SIZE; } static void stmmac_ethtool_gregs(struct net_device *dev, struct ethtool_regs *regs, void *space) { - u32 *reg_space = (u32 *) space; - struct stmmac_priv *priv = netdev_priv(dev); - - memset(reg_space, 0x0, REG_SPACE_SIZE); + u32 *reg_space = (u32 *) space; stmmac_dump_mac_regs(priv, priv->hw, reg_space); stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); - /* Copy DMA registers to where ethtool expects them */ - memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], - NUM_DWMAC1000_DMA_REGS * 4); + + if (!priv->plat->has_xgmac) { + /* Copy DMA registers to where ethtool expects them */ + memcpy(®_space[ETHTOOL_DMA_OFFSET], + ®_space[DMA_BUS_MODE / 4], + NUM_DWMAC1000_DMA_REGS * 4); + } } static int stmmac_nway_reset(struct net_device *dev) @@ -730,8 +746,15 @@ static int stmmac_set_coalesce(struct net_device *dev, (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) return -EOPNOTSUPP; - if (ec->rx_coalesce_usecs == 0) - return -EINVAL; + if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) { + rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); + + if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) + return -EINVAL; + + priv->rx_riwt = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); + } if ((ec->tx_coalesce_usecs == 0) && (ec->tx_max_coalesced_frames == 0)) @@ -741,23 +764,83 @@ static int stmmac_set_coalesce(struct net_device *dev, (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) return -EINVAL; - rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); - - if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) - return -EINVAL; - else if (!priv->use_riwt) - return -EOPNOTSUPP; - /* Only copy relevant parameters, ignore all others. */ priv->tx_coal_frames = ec->tx_max_coalesced_frames; priv->tx_coal_timer = ec->tx_coalesce_usecs; priv->rx_coal_frames = ec->rx_max_coalesced_frames; - priv->rx_riwt = rx_riwt; - stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); + return 0; +} + +static int stmmac_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = priv->plat->rx_queues_to_use; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static u32 stmmac_get_rxfh_key_size(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return sizeof(priv->rss.key); +} + +static u32 stmmac_get_rxfh_indir_size(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return ARRAY_SIZE(priv->rss.table); +} + +static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + if (indir) { + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + indir[i] = priv->rss.table[i]; + } + + if (key) + memcpy(key, priv->rss.key, sizeof(priv->rss.key)); + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; return 0; } +static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (indir) { + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + priv->rss.table[i] = indir[i]; + } + + if (key) + memcpy(priv->rss.key, key, sizeof(priv->rss.key)); + + return stmmac_rss_configure(priv, priv->hw, &priv->rss, + priv->plat->rx_queues_to_use); +} + static int stmmac_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { @@ -849,6 +932,11 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .get_eee = stmmac_ethtool_op_get_eee, .set_eee = stmmac_ethtool_op_set_eee, .get_sset_count = stmmac_get_sset_count, + .get_rxnfc = stmmac_get_rxnfc, + .get_rxfh_key_size = stmmac_get_rxfh_key_size, + .get_rxfh_indir_size = stmmac_get_rxfh_indir_size, + .get_rxfh = stmmac_get_rxfh, + .set_rxfh = stmmac_set_rxfh, .get_ts_info = stmmac_get_ts_info, .get_coalesce = stmmac_get_coalesce, .set_coalesce = stmmac_set_coalesce, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fd54c7c87485..a6cb2aa60e64 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -105,7 +105,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); #ifdef CONFIG_DEBUG_FS -static int stmmac_init_fs(struct net_device *dev); +static void stmmac_init_fs(struct net_device *dev); static void stmmac_exit_fs(struct net_device *dev); #endif @@ -432,6 +432,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, struct sk_buff *skb) { struct skb_shared_hwtstamps shhwtstamp; + bool found = false; u64 ns = 0; if (!priv->hwts_tx_en) @@ -443,9 +444,13 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, /* check tx tstamp status */ if (stmmac_get_tx_timestamp_status(priv, p)) { - /* get the valid tstamp */ stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); + found = true; + } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { + found = true; + } + if (found) { memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); @@ -453,8 +458,6 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, /* pass tstamp to stack */ skb_tstamp_tx(skb, &shhwtstamp); } - - return; } /* stmmac_get_rx_hwtstamp - get HW RX timestamps @@ -828,15 +831,22 @@ static void stmmac_validate(struct phylink_config *config, phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseX_Full); } else if (priv->plat->has_xgmac) { - phylink_set(mac_supported, 2500baseT_Full); - phylink_set(mac_supported, 5000baseT_Full); - phylink_set(mac_supported, 10000baseSR_Full); - phylink_set(mac_supported, 10000baseLR_Full); - phylink_set(mac_supported, 10000baseER_Full); - phylink_set(mac_supported, 10000baseLRM_Full); - phylink_set(mac_supported, 10000baseT_Full); - phylink_set(mac_supported, 10000baseKX4_Full); - phylink_set(mac_supported, 10000baseKR_Full); + if (!max_speed || (max_speed >= 2500)) { + phylink_set(mac_supported, 2500baseT_Full); + phylink_set(mac_supported, 2500baseX_Full); + } + if (!max_speed || (max_speed >= 5000)) { + phylink_set(mac_supported, 5000baseT_Full); + } + if (!max_speed || (max_speed >= 10000)) { + phylink_set(mac_supported, 10000baseSR_Full); + phylink_set(mac_supported, 10000baseLR_Full); + phylink_set(mac_supported, 10000baseER_Full); + phylink_set(mac_supported, 10000baseLRM_Full); + phylink_set(mac_supported, 10000baseT_Full); + phylink_set(mac_supported, 10000baseKX4_Full); + phylink_set(mac_supported, 10000baseKR_Full); + } } /* Half-Duplex can only work with single queue */ @@ -1026,7 +1036,7 @@ static int stmmac_init_phy(struct net_device *dev) static int stmmac_phy_setup(struct stmmac_priv *priv) { struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); - int mode = priv->plat->interface; + int mode = priv->plat->phy_interface; struct phylink *phylink; priv->phylink_config.dev = &priv->dev->dev; @@ -1198,6 +1208,17 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, if (!buf->page) return -ENOMEM; + if (priv->sph) { + buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); + if (!buf->sec_page) + return -ENOMEM; + + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr); + } else { + buf->sec_page = NULL; + } + buf->addr = page_pool_get_dma_addr(buf->page); stmmac_set_desc_addr(priv, p, buf->addr); if (priv->dma_buf_sz == BUF_SIZE_16KiB) @@ -1220,6 +1241,10 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) if (buf->page) page_pool_put_page(rx_q->page_pool, buf->page, false); buf->page = NULL; + + if (buf->sec_page) + page_pool_put_page(rx_q->page_pool, buf->sec_page, false); + buf->sec_page = NULL; } /** @@ -2417,6 +2442,22 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) } } +static void stmmac_mac_config_rss(struct stmmac_priv *priv) +{ + if (!priv->dma_cap.rssen || !priv->plat->rss_en) { + priv->rss.enable = false; + return; + } + + if (priv->dev->features & NETIF_F_RXHASH) + priv->rss.enable = true; + else + priv->rss.enable = false; + + stmmac_rss_configure(priv, priv->hw, &priv->rss, + priv->plat->rx_queues_to_use); +} + /** * stmmac_mtl_configuration - Configure MTL * @priv: driver private structure @@ -2461,6 +2502,10 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv) /* Set RX routing */ if (rx_queues_count > 1) stmmac_mac_config_rx_queues_routing(priv); + + /* Receive Side Scaling */ + if (rx_queues_count > 1) + stmmac_mac_config_rss(priv); } static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) @@ -2573,6 +2618,16 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) stmmac_enable_tso(priv, priv->ioaddr, 1, chan); } + /* Enable Split Header */ + if (priv->sph && priv->hw->rx_csum) { + for (chan = 0; chan < rx_cnt; chan++) + stmmac_enable_sph(priv, priv->ioaddr, 1, chan); + } + + /* VLAN Tag Insertion */ + if (priv->dma_cap.vlins) + stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); + /* Start the ball rolling... */ stmmac_start_all_dma(priv); @@ -2750,6 +2805,33 @@ static int stmmac_release(struct net_device *dev) return 0; } +static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, + struct stmmac_tx_queue *tx_q) +{ + u16 tag = 0x0, inner_tag = 0x0; + u32 inner_type = 0x0; + struct dma_desc *p; + + if (!priv->dma_cap.vlins) + return false; + if (!skb_vlan_tag_present(skb)) + return false; + if (skb->vlan_proto == htons(ETH_P_8021AD)) { + inner_tag = skb_vlan_tag_get(skb); + inner_type = STMMAC_VLAN_INSERT; + } + + tag = skb_vlan_tag_get(skb); + + p = tx_q->dma_tx + tx_q->cur_tx; + if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) + return false; + + stmmac_set_tx_owner(priv, p); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); + return true; +} + /** * stmmac_tso_allocator - close entry point of the driver * @priv: driver private structure @@ -2829,12 +2911,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); int nfrags = skb_shinfo(skb)->nr_frags; u32 queue = skb_get_queue_mapping(skb); - unsigned int first_entry; struct stmmac_tx_queue *tx_q; + unsigned int first_entry; int tmp_pay_len = 0; u32 pay_len, mss; u8 proto_hdr_len; dma_addr_t des; + bool has_vlan; int i; tx_q = &priv->tx_queue[queue]; @@ -2876,12 +2959,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) skb->data_len); } + /* Check if VLAN can be inserted by HW */ + has_vlan = stmmac_vlan_insert(priv, skb, tx_q); + first_entry = tx_q->cur_tx; WARN_ON(tx_q->tx_skbuff[first_entry]); desc = tx_q->dma_tx + first_entry; first = desc; + if (has_vlan) + stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); + /* first descriptor: fill Headers on Buf1 */ des = dma_map_single(priv->device, skb->data, skb_headlen(skb), DMA_TO_DEVICE); @@ -2960,6 +3049,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) priv->xstats.tx_set_ic_bit++; } + if (priv->sarc_type) + stmmac_set_desc_sarc(priv, first, priv->sarc_type); + skb_tx_timestamp(skb); if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && @@ -3038,6 +3130,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int first_entry; unsigned int enh_desc; dma_addr_t des; + bool has_vlan; int entry; tx_q = &priv->tx_queue[queue]; @@ -3063,6 +3156,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } + /* Check if VLAN can be inserted by HW */ + has_vlan = stmmac_vlan_insert(priv, skb, tx_q); + entry = tx_q->cur_tx; first_entry = entry; WARN_ON(tx_q->tx_skbuff[first_entry]); @@ -3076,6 +3172,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) first = desc; + if (has_vlan) + stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); + enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) @@ -3173,6 +3272,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->xstats.tx_set_ic_bit++; } + if (priv->sarc_type) + stmmac_set_desc_sarc(priv, first, priv->sarc_type); + skb_tx_timestamp(skb); /* Ready to fill the first descriptor and set the OWN bit w/o any @@ -3292,6 +3394,17 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) break; } + if (priv->sph && !buf->sec_page) { + buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); + if (!buf->sec_page) + break; + + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); + + dma_sync_single_for_device(priv->device, buf->sec_addr, + len, DMA_FROM_DEVICE); + } + buf->addr = page_pool_get_dma_addr(buf->page); /* Sync whole allocation to device. This will invalidate old @@ -3301,10 +3414,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) DMA_FROM_DEVICE); stmmac_set_desc_addr(priv, p, buf->addr); + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr); stmmac_refill_desc3(priv, rx_q, p); rx_q->rx_count_frames++; - rx_q->rx_count_frames %= priv->rx_coal_frames; + rx_q->rx_count_frames += priv->rx_coal_frames; + if (rx_q->rx_count_frames > priv->rx_coal_frames) + rx_q->rx_count_frames = 0; use_rx_wd = priv->use_riwt && rx_q->rx_count_frames; dma_wmb(); @@ -3330,9 +3446,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; + unsigned int count = 0, error = 0, len = 0; + int status = 0, coe = priv->hw->rx_csum; unsigned int next_entry = rx_q->cur_rx; - int coe = priv->hw->rx_csum; - unsigned int count = 0; + struct sk_buff *skb = NULL; if (netif_msg_rx_status(priv)) { void *rx_head; @@ -3346,10 +3463,30 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); } while (count < limit) { + unsigned int hlen = 0, prev_len = 0; + enum pkt_hash_types hash_type; struct stmmac_rx_buffer *buf; struct dma_desc *np, *p; - int entry, status; + unsigned int sec_len; + int entry; + u32 hash; + + if (!count && rx_q->state_saved) { + skb = rx_q->state.skb; + error = rx_q->state.error; + len = rx_q->state.len; + } else { + rx_q->state_saved = false; + skb = NULL; + error = 0; + len = 0; + } + if (count >= limit) + break; + +read_again: + sec_len = 0; entry = next_entry; buf = &rx_q->buf_pool[entry]; @@ -3376,34 +3513,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) np = rx_q->dma_rx + next_entry; prefetch(np); + prefetch(page_address(buf->page)); if (priv->extend_desc) stmmac_rx_extended_status(priv, &priv->dev->stats, &priv->xstats, rx_q->dma_erx + entry); if (unlikely(status == discard_frame)) { page_pool_recycle_direct(rx_q->page_pool, buf->page); - priv->dev->stats.rx_errors++; buf->page = NULL; - } else { - struct sk_buff *skb; - int frame_len; - unsigned int des; + error = 1; + if (!priv->hwts_rx_en) + priv->dev->stats.rx_errors++; + } - stmmac_get_desc_addr(priv, p, &des); - frame_len = stmmac_get_rx_frame_len(priv, p, coe); + if (unlikely(error && (status & rx_not_ls))) + goto read_again; + if (unlikely(error)) { + dev_kfree_skb(skb); + continue; + } - /* If frame length is greater than skb buffer size - * (preallocated during init) then the packet is - * ignored - */ - if (frame_len > priv->dma_buf_sz) { - if (net_ratelimit()) - netdev_err(priv->dev, - "len %d larger than size (%d)\n", - frame_len, priv->dma_buf_sz); - priv->dev->stats.rx_length_errors++; - continue; - } + /* Buffer is good. Go on. */ + + if (likely(status & rx_not_ls)) { + len += priv->dma_buf_sz; + } else { + prev_len = len; + len = stmmac_get_rx_frame_len(priv, p, coe); /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 * Type frames (LLC/LLC-SNAP) @@ -3414,53 +3550,97 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) */ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || unlikely(status != llc_snap)) - frame_len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } + + if (!skb) { + int ret = stmmac_get_rx_header_len(priv, p, &hlen); - if (netif_msg_rx_status(priv)) { - netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", - p, entry, des); - netdev_dbg(priv->dev, "frame size %d, COE: %d\n", - frame_len, status); + if (priv->sph && !ret && (hlen > 0)) { + sec_len = len; + if (!(status & rx_not_ls)) + sec_len = sec_len - hlen; + len = hlen; + + prefetch(page_address(buf->sec_page)); + priv->xstats.rx_split_hdr_pkt_n++; } - skb = netdev_alloc_skb_ip_align(priv->dev, frame_len); - if (unlikely(!skb)) { + skb = napi_alloc_skb(&ch->rx_napi, len); + if (!skb) { priv->dev->stats.rx_dropped++; continue; } - dma_sync_single_for_cpu(priv->device, buf->addr, - frame_len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(priv->device, buf->addr, len, + DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, page_address(buf->page), - frame_len); - skb_put(skb, frame_len); + len); + skb_put(skb, len); - if (netif_msg_pktdata(priv)) { - netdev_dbg(priv->dev, "frame received (%dbytes)", - frame_len); - print_pkt(skb->data, frame_len); - } - - stmmac_get_rx_hwtstamp(priv, p, np, skb); + /* Data payload copied into SKB, page ready for recycle */ + page_pool_recycle_direct(rx_q->page_pool, buf->page); + buf->page = NULL; + } else { + unsigned int buf_len = len - prev_len; - stmmac_rx_vlan(priv->dev, skb); + if (likely(status & rx_not_ls)) + buf_len = priv->dma_buf_sz; - skb->protocol = eth_type_trans(skb, priv->dev); + dma_sync_single_for_cpu(priv->device, buf->addr, + buf_len, DMA_FROM_DEVICE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + buf->page, 0, buf_len, + priv->dma_buf_sz); - if (unlikely(!coe)) - skb_checksum_none_assert(skb); - else - skb->ip_summed = CHECKSUM_UNNECESSARY; + /* Data payload appended into SKB */ + page_pool_release_page(rx_q->page_pool, buf->page); + buf->page = NULL; + } - napi_gro_receive(&ch->rx_napi, skb); + if (sec_len > 0) { + dma_sync_single_for_cpu(priv->device, buf->sec_addr, + sec_len, DMA_FROM_DEVICE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + buf->sec_page, 0, sec_len, + priv->dma_buf_sz); - /* Data payload copied into SKB, page ready for recycle */ - page_pool_recycle_direct(rx_q->page_pool, buf->page); - buf->page = NULL; + len += sec_len; - priv->dev->stats.rx_packets++; - priv->dev->stats.rx_bytes += frame_len; + /* Data payload appended into SKB */ + page_pool_release_page(rx_q->page_pool, buf->sec_page); + buf->sec_page = NULL; } + + if (likely(status & rx_not_ls)) + goto read_again; + + /* Got entire packet into SKB. Finish it. */ + + stmmac_get_rx_hwtstamp(priv, p, np, skb); + stmmac_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(!coe)) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) + skb_set_hash(skb, hash, hash_type); + + skb_record_rx_queue(skb, queue); + napi_gro_receive(&ch->rx_napi, skb); + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += len; + } + + if (status & rx_not_ls) { + rx_q->state_saved = true; + rx_q->state.skb = skb; + rx_q->state.error = error; + rx_q->state.len = len; } stmmac_rx_refill(priv, queue); @@ -3606,6 +3786,8 @@ static int stmmac_set_features(struct net_device *netdev, netdev_features_t features) { struct stmmac_priv *priv = netdev_priv(netdev); + bool sph_en; + u32 chan; /* Keep the COE Type in case of csum is supporting */ if (features & NETIF_F_RXCSUM) @@ -3617,6 +3799,10 @@ static int stmmac_set_features(struct net_device *netdev, */ stmmac_rx_ipc(priv, priv->hw); + sph_en = (priv->hw->rx_csum > 0) && priv->sph; + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + return 0; } @@ -3755,12 +3941,17 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, struct stmmac_priv *priv = cb_priv; int ret = -EOPNOTSUPP; + if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) + return ret; + stmmac_disable_all_queues(priv); switch (type) { case TC_SETUP_CLSU32: - if (tc_cls_can_offload_and_chain0(priv->dev, type_data)) - ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); + ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); + break; + case TC_SETUP_CLSFLOWER: + ret = stmmac_tc_setup_cls(priv, priv, type_data); break; default: break; @@ -3962,54 +4153,102 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); -static int stmmac_init_fs(struct net_device *dev) +static void stmmac_init_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); /* Create per netdev entries */ priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); - if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { - netdev_err(priv->dev, "ERROR failed to create debugfs directory\n"); + /* Entry to report DMA RX/TX rings */ + debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, + &stmmac_rings_status_fops); - return -ENOMEM; + /* Entry to report the DMA HW features */ + debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, + &stmmac_dma_cap_fops); +} + +static void stmmac_exit_fs(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + debugfs_remove_recursive(priv->dbgfs_dir); +} +#endif /* CONFIG_DEBUG_FS */ + +static u32 stmmac_vid_crc32_le(__le16 vid_le) +{ + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + u32 crc = ~0x0; + u32 temp = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= 0xedb88320; } - /* Entry to report DMA RX/TX rings */ - priv->dbgfs_rings_status = - debugfs_create_file("descriptors_status", 0444, - priv->dbgfs_dir, dev, - &stmmac_rings_status_fops); + return crc; +} - if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { - netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n"); - debugfs_remove_recursive(priv->dbgfs_dir); +static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) +{ + u32 crc, hash = 0; + u16 vid; - return -ENOMEM; + for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { + __le16 vid_le = cpu_to_le16(vid); + crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; + hash |= (1 << crc); } - /* Entry to report the DMA HW features */ - priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444, - priv->dbgfs_dir, - dev, &stmmac_dma_cap_fops); + return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double); +} + +static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + bool is_double = false; + int ret; - if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { - netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n"); - debugfs_remove_recursive(priv->dbgfs_dir); + if (!priv->dma_cap.vlhash) + return -EOPNOTSUPP; + if (be16_to_cpu(proto) == ETH_P_8021AD) + is_double = true; - return -ENOMEM; + set_bit(vid, priv->active_vlans); + ret = stmmac_vlan_update(priv, is_double); + if (ret) { + clear_bit(vid, priv->active_vlans); + return ret; } - return 0; + return ret; } -static void stmmac_exit_fs(struct net_device *dev) +static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) { - struct stmmac_priv *priv = netdev_priv(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + bool is_double = false; - debugfs_remove_recursive(priv->dbgfs_dir); + if (!priv->dma_cap.vlhash) + return -EOPNOTSUPP; + if (be16_to_cpu(proto) == ETH_P_8021AD) + is_double = true; + + clear_bit(vid, priv->active_vlans); + return stmmac_vlan_update(priv, is_double); } -#endif /* CONFIG_DEBUG_FS */ static const struct net_device_ops stmmac_netdev_ops = { .ndo_open = stmmac_open, @@ -4027,6 +4266,8 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_poll_controller = stmmac_poll_controller, #endif .ndo_set_mac_address = stmmac_set_mac_address, + .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, }; static void stmmac_reset_subtask(struct stmmac_priv *priv) @@ -4175,8 +4416,8 @@ int stmmac_dvr_probe(struct device *device, { struct net_device *ndev = NULL; struct stmmac_priv *priv; - u32 queue, maxq; - int ret = 0; + u32 queue, rxq, maxq; + int i, ret = 0; ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); @@ -4259,6 +4500,12 @@ int stmmac_dvr_probe(struct device *device, dev_info(priv->device, "TSO feature enabled\n"); } + if (priv->dma_cap.sphen) { + ndev->hw_features |= NETIF_F_GRO; + priv->sph = true; + dev_info(priv->device, "SPH feature enabled\n"); + } + if (priv->dma_cap.addr64) { ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(priv->dma_cap.addr64)); @@ -4281,15 +4528,33 @@ int stmmac_dvr_probe(struct device *device, #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; + if (priv->dma_cap.vlhash) { + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; + } + if (priv->dma_cap.vlins) { + ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; + if (priv->dma_cap.dvlan) + ndev->features |= NETIF_F_HW_VLAN_STAG_TX; + } #endif priv->msg_enable = netif_msg_init(debug, default_msg_level); + /* Initialize RSS */ + rxq = priv->plat->rx_queues_to_use; + netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); + + if (priv->dma_cap.rssen && priv->plat->rss_en) + ndev->features |= NETIF_F_RXHASH; + /* MTU range: 46 - hw-specific max */ ndev->min_mtu = ETH_ZLEN - ETH_HLEN; - if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) - ndev->max_mtu = JUMBO_LEN; - else if (priv->plat->has_xgmac) + if (priv->plat->has_xgmac) ndev->max_mtu = XGMAC_JUMBO_LEN; + else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) + ndev->max_mtu = JUMBO_LEN; else ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu @@ -4368,10 +4633,7 @@ int stmmac_dvr_probe(struct device *device, } #ifdef CONFIG_DEBUG_FS - ret = stmmac_init_fs(ndev); - if (ret < 0) - netdev_warn(priv->dev, "%s: failed debugFS registration\n", - __func__); + stmmac_init_fs(ndev); #endif return ret; @@ -4451,10 +4713,12 @@ int stmmac_suspend(struct device *dev) if (!ndev || !netif_running(ndev)) return 0; - phylink_stop(priv->phylink); - mutex_lock(&priv->lock); + rtnl_lock(); + phylink_stop(priv->phylink); + rtnl_unlock(); + netif_device_detach(ndev); stmmac_stop_all_queues(priv); @@ -4558,9 +4822,11 @@ int stmmac_resume(struct device *dev) stmmac_start_all_queues(priv); - mutex_unlock(&priv->lock); - + rtnl_lock(); phylink_start(priv->phylink); + rtnl_unlock(); + + mutex_unlock(&priv->lock); return 0; } @@ -4617,16 +4883,8 @@ static int __init stmmac_init(void) { #ifdef CONFIG_DEBUG_FS /* Create debugfs main directory if it doesn't exist yet */ - if (!stmmac_fs_dir) { + if (!stmmac_fs_dir) stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); - - if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { - pr_err("ERROR %s, debugfs create directory failed\n", - STMMAC_RESOURCE_NAME); - - return -ENOMEM; - } - } #endif return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 4304c1abc5d1..40c42637ad75 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -348,7 +348,9 @@ int stmmac_mdio_register(struct net_device *ndev) max_addr = PHY_MAX_ADDR; } - new_bus->reset = &stmmac_mdio_reset; + if (mdio_bus_data->needs_reset) + new_bus->reset = &stmmac_mdio_reset; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name, priv->plat->bus_id); new_bus->priv = ndev; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 86f9c07a38cf..292045f4581f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -9,6 +9,7 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#include <linux/clk-provider.h> #include <linux/pci.h> #include <linux/dmi.h> @@ -63,6 +64,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat) plat->has_gmac = 1; plat->force_sf_dma_mode = 1; + plat->mdio_bus_data->needs_reset = true; plat->mdio_bus_data->phy_mask = 0; /* Set default value for multicast hash bins */ @@ -107,6 +109,166 @@ static const struct stmmac_pci_info stmmac_pci_info = { .setup = stmmac_default_data, }; +static int intel_mgbe_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + int i; + + plat->clk_csr = 5; + plat->has_gmac = 0; + plat->has_gmac4 = 1; + plat->force_sf_dma_mode = 0; + plat->tso_en = 1; + + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + + for (i = 0; i < plat->rx_queues_to_use; i++) { + plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + plat->rx_queues_cfg[i].chan = i; + + /* Disable Priority config by default */ + plat->rx_queues_cfg[i].use_prio = false; + + /* Disable RX queues routing by default */ + plat->rx_queues_cfg[i].pkt_route = 0x0; + } + + for (i = 0; i < plat->tx_queues_to_use; i++) { + plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + + /* Disable Priority config by default */ + plat->tx_queues_cfg[i].use_prio = false; + } + + /* FIFO size is 4096 bytes for 1 tx/rx queue */ + plat->tx_fifo_size = plat->tx_queues_to_use * 4096; + plat->rx_fifo_size = plat->rx_queues_to_use * 4096; + + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; + plat->tx_queues_cfg[0].weight = 0x09; + plat->tx_queues_cfg[1].weight = 0x0A; + plat->tx_queues_cfg[2].weight = 0x0B; + plat->tx_queues_cfg[3].weight = 0x0C; + plat->tx_queues_cfg[4].weight = 0x0D; + plat->tx_queues_cfg[5].weight = 0x0E; + plat->tx_queues_cfg[6].weight = 0x0F; + plat->tx_queues_cfg[7].weight = 0x10; + + plat->mdio_bus_data->phy_mask = 0; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; + plat->dma_cfg->fixed_burst = 0; + plat->dma_cfg->mixed_burst = 0; + plat->dma_cfg->aal = 0; + + plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), + GFP_KERNEL); + if (!plat->axi) + return -ENOMEM; + + plat->axi->axi_lpi_en = 0; + plat->axi->axi_xit_frm = 0; + plat->axi->axi_wr_osr_lmt = 1; + plat->axi->axi_rd_osr_lmt = 1; + plat->axi->axi_blen[0] = 4; + plat->axi->axi_blen[1] = 8; + plat->axi->axi_blen[2] = 16; + + plat->ptp_max_adj = plat->clk_ptp_rate; + + /* Set system clock */ + plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, + "stmmac-clk", NULL, 0, + plat->clk_ptp_rate); + + if (IS_ERR(plat->stmmac_clk)) { + dev_warn(&pdev->dev, "Fail to register stmmac-clk\n"); + plat->stmmac_clk = NULL; + } + clk_prepare_enable(plat->stmmac_clk); + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + + return 0; +} + +static int ehl_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + int ret; + + plat->rx_queues_to_use = 8; + plat->tx_queues_to_use = 8; + plat->clk_ptp_rate = 200000000; + ret = intel_mgbe_common_data(pdev, plat); + if (ret) + return ret; + + return 0; +} + +static int ehl_sgmii_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_addr = 0; + plat->interface = PHY_INTERFACE_MODE_SGMII; + return ehl_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_sgmii1g_pci_info = { + .setup = ehl_sgmii_data, +}; + +static int ehl_rgmii_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_addr = 0; + plat->interface = PHY_INTERFACE_MODE_RGMII; + return ehl_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_rgmii1g_pci_info = { + .setup = ehl_rgmii_data, +}; + +static int tgl_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + int ret; + + plat->rx_queues_to_use = 6; + plat->tx_queues_to_use = 4; + plat->clk_ptp_rate = 200000000; + ret = intel_mgbe_common_data(pdev, plat); + if (ret) + return ret; + + return 0; +} + +static int tgl_sgmii_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_addr = 0; + plat->interface = PHY_INTERFACE_MODE_SGMII; + return tgl_common_data(pdev, plat); +} + +static struct stmmac_pci_info tgl_sgmii1g_pci_info = { + .setup = tgl_sgmii_data, +}; + static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = { { .func = 6, @@ -213,6 +375,75 @@ static const struct stmmac_pci_info quark_pci_info = { .setup = quark_default_data, }; +static int snps_gmac5_default_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + int i; + + plat->clk_csr = 5; + plat->has_gmac4 = 1; + plat->force_sf_dma_mode = 1; + plat->tso_en = 1; + plat->pmt = 1; + + plat->mdio_bus_data->phy_mask = 0; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + + /* Set default number of RX and TX queues to use */ + plat->tx_queues_to_use = 4; + plat->rx_queues_to_use = 4; + + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; + for (i = 0; i < plat->tx_queues_to_use; i++) { + plat->tx_queues_cfg[i].use_prio = false; + plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + plat->tx_queues_cfg[i].weight = 25; + } + + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + for (i = 0; i < plat->rx_queues_to_use; i++) { + plat->rx_queues_cfg[i].use_prio = false; + plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + plat->rx_queues_cfg[i].pkt_route = 0x0; + plat->rx_queues_cfg[i].chan = i; + } + + plat->bus_id = 1; + plat->phy_addr = -1; + plat->interface = PHY_INTERFACE_MODE_GMII; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; + + /* Axi Configuration */ + plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), GFP_KERNEL); + if (!plat->axi) + return -ENOMEM; + + plat->axi->axi_wr_osr_lmt = 31; + plat->axi->axi_rd_osr_lmt = 31; + + plat->axi->axi_fb = false; + plat->axi->axi_blen[0] = 4; + plat->axi->axi_blen[1] = 8; + plat->axi->axi_blen[2] = 16; + plat->axi->axi_blen[3] = 32; + + return 0; +} + +static const struct stmmac_pci_info snps_gmac5_pci_info = { + .setup = snps_gmac5_default_data, +}; + /** * stmmac_pci_probe * @@ -292,10 +523,15 @@ static int stmmac_pci_probe(struct pci_dev *pdev, */ static void stmmac_pci_remove(struct pci_dev *pdev) { + struct net_device *ndev = dev_get_drvdata(&pdev->dev); + struct stmmac_priv *priv = netdev_priv(ndev); int i; stmmac_dvr_remove(&pdev->dev); + if (priv->plat->stmmac_clk) + clk_unregister_fixed_rate(priv->plat->stmmac_clk); + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { if (pci_resource_len(pdev, i) == 0) continue; @@ -348,6 +584,10 @@ static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); #define STMMAC_QUARK_ID 0x0937 #define STMMAC_DEVICE_ID 0x1108 +#define STMMAC_EHL_RGMII1G_ID 0x4b30 +#define STMMAC_EHL_SGMII1G_ID 0x4b31 +#define STMMAC_TGL_SGMII1G_ID 0xa0ac +#define STMMAC_GMAC5_ID 0x7102 #define STMMAC_DEVICE(vendor_id, dev_id, info) { \ PCI_VDEVICE(vendor_id, dev_id), \ @@ -358,6 +598,10 @@ static const struct pci_device_id stmmac_id_table[] = { STMMAC_DEVICE(STMMAC, STMMAC_DEVICE_ID, stmmac_pci_info), STMMAC_DEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_MAC, stmmac_pci_info), STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_EHL_RGMII1G_ID, ehl_rgmii1g_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_EHL_SGMII1G_ID, ehl_sgmii1g_pci_info), + STMMAC_DEVICE(INTEL, STMMAC_TGL_SGMII1G_ID, tgl_sgmii1g_pci_info), + STMMAC_DEVICE(SYNOPSYS, STMMAC_GMAC5_ID, snps_gmac5_pci_info), {} }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 154daf4d1072..170c3a052b14 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -23,6 +23,7 @@ /** * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins + * @dev: struct device of the platform device * @mcast_bins: Multicast filtering bins * Description: * this function validates the number of Multicast filtering bins specified @@ -33,7 +34,7 @@ * invalid and will cause the filtering algorithm to use Multicast * promiscuous mode. */ -static int dwmac1000_validate_mcast_bins(int mcast_bins) +static int dwmac1000_validate_mcast_bins(struct device *dev, int mcast_bins) { int x = mcast_bins; @@ -44,8 +45,8 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins) break; default: x = 0; - pr_info("Hash table entries set to unexpected value %d", - mcast_bins); + dev_info(dev, "Hash table entries set to unexpected value %d\n", + mcast_bins); break; } return x; @@ -53,6 +54,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins) /** * dwmac1000_validate_ucast_entries - validate the Unicast address entries + * @dev: struct device of the platform device * @ucast_entries: number of Unicast address entries * Description: * This function validates the number of Unicast address entries supported @@ -62,7 +64,8 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins) * selected, and defaults to 1 Unicast address if an unsupported * configuration is selected. */ -static int dwmac1000_validate_ucast_entries(int ucast_entries) +static int dwmac1000_validate_ucast_entries(struct device *dev, + int ucast_entries) { int x = ucast_entries; @@ -73,8 +76,8 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) break; default: x = 1; - pr_info("Unicast table entries set to unexpected value %d\n", - ucast_entries); + dev_info(dev, "Unicast table entries set to unexpected value %d\n", + ucast_entries); break; } return x; @@ -342,14 +345,46 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, mdio = true; } - if (mdio) + if (mdio) { plat->mdio_bus_data = devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data), GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + + plat->mdio_bus_data->needs_reset = true; + } + return 0; } /** + * stmmac_of_get_mac_mode - retrieves the interface of the MAC + * @np - device-tree node + * Description: + * Similar to `of_get_phy_mode()`, this function will retrieve (from + * the device-tree) the interface mode on the MAC side. This assumes + * that there is mode converter in-between the MAC & PHY + * (e.g. GMII-to-RGMII). + */ +static int stmmac_of_get_mac_mode(struct device_node *np) +{ + const char *pm; + int err, i; + + err = of_property_read_string(np, "mac-mode", &pm); + if (err < 0) + return err; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { + if (!strcasecmp(pm, phy_modes(i))) + return i; + } + + return -ENODEV; +} + +/** * stmmac_probe_config_dt - parse device-tree driver parameters * @pdev: platform_device structure * @mac: MAC address to use @@ -377,7 +412,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) *mac = NULL; } - plat->interface = of_get_phy_mode(np); + plat->phy_interface = of_get_phy_mode(np); + if (plat->phy_interface < 0) + return ERR_PTR(plat->phy_interface); + + plat->interface = stmmac_of_get_mac_mode(np); + if (plat->interface < 0) + plat->interface = plat->phy_interface; /* Some wrapper drivers still rely on phy_node. Let's save it while * they are not converted to phylink. */ @@ -457,9 +498,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) of_property_read_u32(np, "snps,perfect-filter-entries", &plat->unicast_filter_entries); plat->unicast_filter_entries = dwmac1000_validate_ucast_entries( - plat->unicast_filter_entries); + &pdev->dev, plat->unicast_filter_entries); plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( - plat->multicast_filter_bins); + &pdev->dev, plat->multicast_filter_bins); plat->has_gmac = 1; plat->pmt = 1; } @@ -508,7 +549,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); if (plat->force_thresh_dma_mode) { plat->force_sf_dma_mode = 0; - pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); + dev_warn(&pdev->dev, + "force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n"); } of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); @@ -522,13 +564,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) } /* clock setup */ - plat->stmmac_clk = devm_clk_get(&pdev->dev, - STMMAC_RESOURCE_NAME); - if (IS_ERR(plat->stmmac_clk)) { - dev_warn(&pdev->dev, "Cannot get CSR clock\n"); - plat->stmmac_clk = NULL; + if (!of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) { + plat->stmmac_clk = devm_clk_get(&pdev->dev, + STMMAC_RESOURCE_NAME); + if (IS_ERR(plat->stmmac_clk)) { + dev_warn(&pdev->dev, "Cannot get CSR clock\n"); + plat->stmmac_clk = NULL; + } + clk_prepare_enable(plat->stmmac_clk); } - clk_prepare_enable(plat->stmmac_clk); plat->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(plat->pclk)) { @@ -609,13 +653,8 @@ int stmmac_get_platform_resources(struct platform_device *pdev, * probe if needed before we went too far with resource allocation. */ stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); - if (stmmac_res->irq < 0) { - if (stmmac_res->irq != -EPROBE_DEFER) { - dev_err(&pdev->dev, - "MAC IRQ configuration information not found\n"); - } + if (stmmac_res->irq < 0) return stmmac_res->irq; - } /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq * The external wake up irq can be passed through the platform code diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index c48224973a37..173493db038c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -194,6 +194,9 @@ void stmmac_ptp_register(struct stmmac_priv *priv) priv->pps[i].available = true; } + if (priv->plat->ptp_max_adj) + stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; + stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; spin_lock_init(&priv->ptp_lock); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index a97b1ea76438..c56e89e1ae56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -11,8 +11,10 @@ #include <linux/ip.h> #include <linux/phy.h> #include <linux/udp.h> +#include <net/pkt_cls.h> #include <net/tcp.h> #include <net/udp.h> +#include <net/tc_act/tc_gact.h> #include "stmmac.h" struct stmmachdr { @@ -41,8 +43,11 @@ struct stmmac_packet_attrs { int dont_wait; int timeout; int size; + int max_size; int remove_sa; u8 id; + int sarc; + u16 queue_mapping; }; static u8 stmmac_test_next_id; @@ -70,12 +75,14 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, else size += sizeof(struct udphdr); - skb = netdev_alloc_skb(priv->dev, size); + if (attr->max_size && (attr->max_size > size)) + size = attr->max_size; + + skb = netdev_alloc_skb_ip_align(priv->dev, size); if (!skb) return NULL; prefetchw(skb->data); - skb_reserve(skb, NET_IP_ALIGN); if (attr->vlan > 1) ehdr = skb_push(skb, ETH_HLEN + 8); @@ -144,6 +151,9 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, uhdr->source = htons(attr->sport); uhdr->dest = htons(attr->dport); uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size); + if (attr->max_size) + uhdr->len = htons(attr->max_size - + (sizeof(*ihdr) + sizeof(*ehdr))); uhdr->check = 0; } @@ -159,9 +169,13 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, iplen += sizeof(*thdr); else iplen += sizeof(*uhdr); + + if (attr->max_size) + iplen = attr->max_size - sizeof(*ehdr); + ihdr->tot_len = htons(iplen); ihdr->frag_off = 0; - ihdr->saddr = 0; + ihdr->saddr = htonl(attr->ip_src); ihdr->daddr = htonl(attr->ip_dst); ihdr->tos = 0; ihdr->id = 0; @@ -175,6 +189,8 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, if (attr->size) skb_put(skb, attr->size); + if (attr->max_size && (attr->max_size > skb->len)) + skb_put(skb, attr->max_size - skb->len); skb->csum = 0; skb->ip_summed = CHECKSUM_PARTIAL; @@ -193,6 +209,24 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, return skb; } +static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv, + struct stmmac_packet_attrs *attr) +{ + __be32 ip_src = htonl(attr->ip_src); + __be32 ip_dst = htonl(attr->ip_dst); + struct sk_buff *skb = NULL; + + skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src, + NULL, attr->src, attr->dst); + if (!skb) + return NULL; + + skb->pkt_type = PACKET_HOST; + skb->dev = priv->dev; + + return skb; +} + struct stmmac_test_priv { struct stmmac_packet_attrs *packet; struct packet_type pt; @@ -228,8 +262,11 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb, if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) goto out; } - if (tpriv->packet->src) { - if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr)) + if (tpriv->packet->sarc) { + if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest)) + goto out; + } else if (tpriv->packet->src) { + if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src)) goto out; } @@ -290,7 +327,9 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv, tpriv->pt.dev = priv->dev; tpriv->pt.af_packet_priv = tpriv; tpriv->packet = attr; - dev_add_pack(&tpriv->pt); + + if (!attr->dont_wait) + dev_add_pack(&tpriv->pt); skb = stmmac_test_get_udp_skb(priv, attr); if (!skb) { @@ -298,7 +337,7 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv, goto cleanup; } - skb_set_queue_mapping(skb, 0); + skb_set_queue_mapping(skb, attr->queue_mapping); ret = dev_queue_xmit(skb); if (ret) goto cleanup; @@ -310,10 +349,11 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv, attr->timeout = STMMAC_LB_TIMEOUT; wait_for_completion_timeout(&tpriv->comp, attr->timeout); - ret = !tpriv->ok; + ret = tpriv->ok ? 0 : -ETIMEDOUT; cleanup: - dev_remove_pack(&tpriv->pt); + if (!attr->dont_wait) + dev_remove_pack(&tpriv->pt); kfree(tpriv); return ret; } @@ -471,7 +511,7 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv) /* Shall NOT receive packet */ ret = __stmmac_test_loopback(priv, &attr); - ret = !ret; + ret = ret ? 0 : -EINVAL; cleanup: dev_mc_del(priv->dev, gd_addr); @@ -503,7 +543,7 @@ static int stmmac_test_pfilt(struct stmmac_priv *priv) /* Shall NOT receive packet */ ret = __stmmac_test_loopback(priv, &attr); - ret = !ret; + ret = ret ? 0 : -EINVAL; cleanup: dev_uc_del(priv->dev, gd_addr); @@ -553,7 +593,7 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv) /* Shall NOT receive packet */ ret = __stmmac_test_loopback(priv, &attr); - ret = !ret; + ret = ret ? 0 : -EINVAL; cleanup: dev_uc_del(priv->dev, uc_addr); @@ -591,7 +631,7 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv) /* Shall NOT receive packet */ ret = __stmmac_test_loopback(priv, &attr); - ret = !ret; + ret = ret ? 0 : -EINVAL; cleanup: dev_mc_del(priv->dev, mc_addr); @@ -682,15 +722,21 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv) for (i = 0; i < rx_cnt; i++) { struct stmmac_channel *ch = &priv->channel[i]; + u32 tail; + tail = priv->rx_queue[i].dma_rx_phy + + (DMA_RX_SIZE * sizeof(struct dma_desc)); + + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i); stmmac_start_rx(priv, priv->ioaddr, i); + local_bh_disable(); napi_reschedule(&ch->rx_napi); local_bh_enable(); } wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); - ret = !tpriv->ok; + ret = tpriv->ok ? 0 : -ETIMEDOUT; cleanup: dev_mc_del(priv->dev, paddr); @@ -700,6 +746,901 @@ cleanup: return ret; } +static int stmmac_test_rss(struct stmmac_priv *priv) +{ + struct stmmac_packet_attrs attr = { }; + + if (!priv->dma_cap.rssen || !priv->rss.enable) + return -EOPNOTSUPP; + + attr.dst = priv->dev->dev_addr; + attr.exp_hash = true; + attr.sport = 0x321; + attr.dport = 0x123; + + return __stmmac_test_loopback(priv, &attr); +} + +static int stmmac_test_vlan_validate(struct sk_buff *skb, + struct net_device *ndev, + struct packet_type *pt, + struct net_device *orig_ndev) +{ + struct stmmac_test_priv *tpriv = pt->af_packet_priv; + struct stmmachdr *shdr; + struct ethhdr *ehdr; + struct udphdr *uhdr; + struct iphdr *ihdr; + u16 proto; + + proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q; + + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) + goto out; + + if (skb_linearize(skb)) + goto out; + if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) + goto out; + if (tpriv->vlan_id) { + if (skb->vlan_proto != htons(proto)) + goto out; + if (skb->vlan_tci != tpriv->vlan_id) + goto out; + } + + ehdr = (struct ethhdr *)skb_mac_header(skb); + if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) + goto out; + + ihdr = ip_hdr(skb); + if (tpriv->double_vlan) + ihdr = (struct iphdr *)(skb_network_header(skb) + 4); + if (ihdr->protocol != IPPROTO_UDP) + goto out; + + uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); + if (uhdr->dest != htons(tpriv->packet->dport)) + goto out; + + shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); + if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) + goto out; + + tpriv->ok = true; + complete(&tpriv->comp); + +out: + kfree_skb(skb); + return 0; +} + +static int stmmac_test_vlanfilt(struct stmmac_priv *priv) +{ + struct stmmac_packet_attrs attr = { }; + struct stmmac_test_priv *tpriv; + struct sk_buff *skb = NULL; + int ret = 0, i; + + if (!priv->dma_cap.vlhash) + return -EOPNOTSUPP; + + tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); + if (!tpriv) + return -ENOMEM; + + tpriv->ok = false; + init_completion(&tpriv->comp); + + tpriv->pt.type = htons(ETH_P_IP); + tpriv->pt.func = stmmac_test_vlan_validate; + tpriv->pt.dev = priv->dev; + tpriv->pt.af_packet_priv = tpriv; + tpriv->packet = &attr; + + /* + * As we use HASH filtering, false positives may appear. This is a + * specially chosen ID so that adjacent IDs (+4) have different + * HASH values. + */ + tpriv->vlan_id = 0x123; + dev_add_pack(&tpriv->pt); + + ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); + if (ret) + goto cleanup; + + for (i = 0; i < 4; i++) { + attr.vlan = 1; + attr.vlan_id_out = tpriv->vlan_id + i; + attr.dst = priv->dev->dev_addr; + attr.sport = 9; + attr.dport = 9; + + skb = stmmac_test_get_udp_skb(priv, &attr); + if (!skb) { + ret = -ENOMEM; + goto vlan_del; + } + + skb_set_queue_mapping(skb, 0); + ret = dev_queue_xmit(skb); + if (ret) + goto vlan_del; + + wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); + ret = tpriv->ok ? 0 : -ETIMEDOUT; + if (ret && !i) { + goto vlan_del; + } else if (!ret && i) { + ret = -EINVAL; + goto vlan_del; + } else { + ret = 0; + } + + tpriv->ok = false; + } + +vlan_del: + vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); +cleanup: + dev_remove_pack(&tpriv->pt); + kfree(tpriv); + return ret; +} + +static int stmmac_test_dvlanfilt(struct stmmac_priv *priv) +{ + struct stmmac_packet_attrs attr = { }; + struct stmmac_test_priv *tpriv; + struct sk_buff *skb = NULL; + int ret = 0, i; + + if (!priv->dma_cap.vlhash) + return -EOPNOTSUPP; + + tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); + if (!tpriv) + return -ENOMEM; + + tpriv->ok = false; + tpriv->double_vlan = true; + init_completion(&tpriv->comp); + + tpriv->pt.type = htons(ETH_P_8021Q); + tpriv->pt.func = stmmac_test_vlan_validate; + tpriv->pt.dev = priv->dev; + tpriv->pt.af_packet_priv = tpriv; + tpriv->packet = &attr; + + /* + * As we use HASH filtering, false positives may appear. This is a + * specially chosen ID so that adjacent IDs (+4) have different + * HASH values. + */ + tpriv->vlan_id = 0x123; + dev_add_pack(&tpriv->pt); + + ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); + if (ret) + goto cleanup; + + for (i = 0; i < 4; i++) { + attr.vlan = 2; + attr.vlan_id_out = tpriv->vlan_id + i; + attr.dst = priv->dev->dev_addr; + attr.sport = 9; + attr.dport = 9; + + skb = stmmac_test_get_udp_skb(priv, &attr); + if (!skb) { + ret = -ENOMEM; + goto vlan_del; + } + + skb_set_queue_mapping(skb, 0); + ret = dev_queue_xmit(skb); + if (ret) + goto vlan_del; + + wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); + ret = tpriv->ok ? 0 : -ETIMEDOUT; + if (ret && !i) { + goto vlan_del; + } else if (!ret && i) { + ret = -EINVAL; + goto vlan_del; + } else { + ret = 0; + } + + tpriv->ok = false; + } + +vlan_del: + vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); +cleanup: + dev_remove_pack(&tpriv->pt); + kfree(tpriv); + return ret; +} + +#ifdef CONFIG_NET_CLS_ACT +static int stmmac_test_rxp(struct stmmac_priv *priv) +{ + unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00}; + struct tc_cls_u32_offload cls_u32 = { }; + struct stmmac_packet_attrs attr = { }; + struct tc_action **actions, *act; + struct tc_u32_sel *sel; + struct tcf_exts *exts; + int ret, i, nk = 1; + + if (!tc_can_offload(priv->dev)) + return -EOPNOTSUPP; + if (!priv->dma_cap.frpsel) + return -EOPNOTSUPP; + + sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL); + if (!sel) + return -ENOMEM; + + exts = kzalloc(sizeof(*exts), GFP_KERNEL); + if (!exts) { + ret = -ENOMEM; + goto cleanup_sel; + } + + actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL); + if (!actions) { + ret = -ENOMEM; + goto cleanup_exts; + } + + act = kzalloc(nk * sizeof(*act), GFP_KERNEL); + if (!act) { + ret = -ENOMEM; + goto cleanup_actions; + } + + cls_u32.command = TC_CLSU32_NEW_KNODE; + cls_u32.common.chain_index = 0; + cls_u32.common.protocol = htons(ETH_P_ALL); + cls_u32.knode.exts = exts; + cls_u32.knode.sel = sel; + cls_u32.knode.handle = 0x123; + + exts->nr_actions = nk; + exts->actions = actions; + for (i = 0; i < nk; i++) { + struct tcf_gact *gact = to_gact(&act[i]); + + actions[i] = &act[i]; + gact->tcf_action = TC_ACT_SHOT; + } + + sel->nkeys = nk; + sel->offshift = 0; + sel->keys[0].off = 6; + sel->keys[0].val = htonl(0xdeadbeef); + sel->keys[0].mask = ~0x0; + + ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); + if (ret) + goto cleanup_act; + + attr.dst = priv->dev->dev_addr; + attr.src = addr; + + ret = __stmmac_test_loopback(priv, &attr); + ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */ + + cls_u32.command = TC_CLSU32_DELETE_KNODE; + stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); + +cleanup_act: + kfree(act); +cleanup_actions: + kfree(actions); +cleanup_exts: + kfree(exts); +cleanup_sel: + kfree(sel); + return ret; +} +#else +static int stmmac_test_rxp(struct stmmac_priv *priv) +{ + return -EOPNOTSUPP; +} +#endif + +static int stmmac_test_desc_sai(struct stmmac_priv *priv) +{ + unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + struct stmmac_packet_attrs attr = { }; + int ret; + + if (!priv->dma_cap.vlins) + return -EOPNOTSUPP; + + attr.remove_sa = true; + attr.sarc = true; + attr.src = src; + attr.dst = priv->dev->dev_addr; + + priv->sarc_type = 0x1; + + ret = __stmmac_test_loopback(priv, &attr); + + priv->sarc_type = 0x0; + return ret; +} + +static int stmmac_test_desc_sar(struct stmmac_priv *priv) +{ + unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + struct stmmac_packet_attrs attr = { }; + int ret; + + if (!priv->dma_cap.vlins) + return -EOPNOTSUPP; + + attr.sarc = true; + attr.src = src; + attr.dst = priv->dev->dev_addr; + + priv->sarc_type = 0x2; + + ret = __stmmac_test_loopback(priv, &attr); + + priv->sarc_type = 0x0; + return ret; +} + +static int stmmac_test_reg_sai(struct stmmac_priv *priv) +{ + unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + struct stmmac_packet_attrs attr = { }; + int ret; + + if (!priv->dma_cap.vlins) + return -EOPNOTSUPP; + + attr.remove_sa = true; + attr.sarc = true; + attr.src = src; + attr.dst = priv->dev->dev_addr; + + if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2)) + return -EOPNOTSUPP; + + ret = __stmmac_test_loopback(priv, &attr); + + stmmac_sarc_configure(priv, priv->ioaddr, 0x0); + return ret; +} + +static int stmmac_test_reg_sar(struct stmmac_priv *priv) +{ + unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + struct stmmac_packet_attrs attr = { }; + int ret; + + if (!priv->dma_cap.vlins) + return -EOPNOTSUPP; + + attr.sarc = true; + attr.src = src; + attr.dst = priv->dev->dev_addr; + + if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3)) + return -EOPNOTSUPP; + + ret = __stmmac_test_loopback(priv, &attr); + + stmmac_sarc_configure(priv, priv->ioaddr, 0x0); + return ret; +} + +static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan) +{ + struct stmmac_packet_attrs attr = { }; + struct stmmac_test_priv *tpriv; + struct sk_buff *skb = NULL; + int ret = 0; + u16 proto; + + if (!priv->dma_cap.vlins) + return -EOPNOTSUPP; + + tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); + if (!tpriv) + return -ENOMEM; + + proto = svlan ? ETH_P_8021AD : ETH_P_8021Q; + + tpriv->ok = false; + tpriv->double_vlan = svlan; + init_completion(&tpriv->comp); + + tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP); + tpriv->pt.func = stmmac_test_vlan_validate; + tpriv->pt.dev = priv->dev; + tpriv->pt.af_packet_priv = tpriv; + tpriv->packet = &attr; + tpriv->vlan_id = 0x123; + dev_add_pack(&tpriv->pt); + + ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id); + if (ret) + goto cleanup; + + attr.dst = priv->dev->dev_addr; + + skb = stmmac_test_get_udp_skb(priv, &attr); + if (!skb) { + ret = -ENOMEM; + goto vlan_del; + } + + __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id); + skb->protocol = htons(proto); + + skb_set_queue_mapping(skb, 0); + ret = dev_queue_xmit(skb); + if (ret) + goto vlan_del; + + wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); + ret = tpriv->ok ? 0 : -ETIMEDOUT; + +vlan_del: + vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id); +cleanup: + dev_remove_pack(&tpriv->pt); + kfree(tpriv); + return ret; +} + +static int stmmac_test_vlanoff(struct stmmac_priv *priv) +{ + return stmmac_test_vlanoff_common(priv, false); +} + +static int stmmac_test_svlanoff(struct stmmac_priv *priv) +{ + if (!priv->dma_cap.dvlan) + return -EOPNOTSUPP; + return stmmac_test_vlanoff_common(priv, true); +} + +#ifdef CONFIG_NET_CLS_ACT +static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, + u32 dst_mask, u32 src_mask) +{ + struct flow_dissector_key_ipv4_addrs key, mask; + unsigned long dummy_cookie = 0xdeadbeef; + struct stmmac_packet_attrs attr = { }; + struct flow_dissector *dissector; + struct flow_cls_offload *cls; + struct flow_rule *rule; + int ret; + + if (!tc_can_offload(priv->dev)) + return -EOPNOTSUPP; + if (!priv->dma_cap.l3l4fnum) + return -EOPNOTSUPP; + if (priv->rss.enable) { + struct stmmac_rss rss = { .enable = false, }; + + stmmac_rss_configure(priv, priv->hw, &rss, + priv->plat->rx_queues_to_use); + } + + dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); + if (!dissector) { + ret = -ENOMEM; + goto cleanup_rss; + } + + dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS); + dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0; + + cls = kzalloc(sizeof(*cls), GFP_KERNEL); + if (!cls) { + ret = -ENOMEM; + goto cleanup_dissector; + } + + cls->common.chain_index = 0; + cls->command = FLOW_CLS_REPLACE; + cls->cookie = dummy_cookie; + + rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); + if (!rule) { + ret = -ENOMEM; + goto cleanup_cls; + } + + rule->match.dissector = dissector; + rule->match.key = (void *)&key; + rule->match.mask = (void *)&mask; + + key.src = htonl(src); + key.dst = htonl(dst); + mask.src = src_mask; + mask.dst = dst_mask; + + cls->rule = rule; + + rule->action.entries[0].id = FLOW_ACTION_DROP; + rule->action.num_entries = 1; + + attr.dst = priv->dev->dev_addr; + attr.ip_dst = dst; + attr.ip_src = src; + + /* Shall receive packet */ + ret = __stmmac_test_loopback(priv, &attr); + if (ret) + goto cleanup_rule; + + ret = stmmac_tc_setup_cls(priv, priv, cls); + if (ret) + goto cleanup_rule; + + /* Shall NOT receive packet */ + ret = __stmmac_test_loopback(priv, &attr); + ret = ret ? 0 : -EINVAL; + + cls->command = FLOW_CLS_DESTROY; + stmmac_tc_setup_cls(priv, priv, cls); +cleanup_rule: + kfree(rule); +cleanup_cls: + kfree(cls); +cleanup_dissector: + kfree(dissector); +cleanup_rss: + if (priv->rss.enable) { + stmmac_rss_configure(priv, priv->hw, &priv->rss, + priv->plat->rx_queues_to_use); + } + + return ret; +} +#else +static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, + u32 dst_mask, u32 src_mask) +{ + return -EOPNOTSUPP; +} +#endif + +static int stmmac_test_l3filt_da(struct stmmac_priv *priv) +{ + u32 addr = 0x10203040; + + return __stmmac_test_l3filt(priv, addr, 0, ~0, 0); +} + +static int stmmac_test_l3filt_sa(struct stmmac_priv *priv) +{ + u32 addr = 0x10203040; + + return __stmmac_test_l3filt(priv, 0, addr, 0, ~0); +} + +#ifdef CONFIG_NET_CLS_ACT +static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, + u32 dst_mask, u32 src_mask, bool udp) +{ + struct { + struct flow_dissector_key_basic bkey; + struct flow_dissector_key_ports key; + } __aligned(BITS_PER_LONG / 8) keys; + struct { + struct flow_dissector_key_basic bmask; + struct flow_dissector_key_ports mask; + } __aligned(BITS_PER_LONG / 8) masks; + unsigned long dummy_cookie = 0xdeadbeef; + struct stmmac_packet_attrs attr = { }; + struct flow_dissector *dissector; + struct flow_cls_offload *cls; + struct flow_rule *rule; + int ret; + + if (!tc_can_offload(priv->dev)) + return -EOPNOTSUPP; + if (!priv->dma_cap.l3l4fnum) + return -EOPNOTSUPP; + if (priv->rss.enable) { + struct stmmac_rss rss = { .enable = false, }; + + stmmac_rss_configure(priv, priv->hw, &rss, + priv->plat->rx_queues_to_use); + } + + dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); + if (!dissector) { + ret = -ENOMEM; + goto cleanup_rss; + } + + dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC); + dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS); + dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0; + dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key); + + cls = kzalloc(sizeof(*cls), GFP_KERNEL); + if (!cls) { + ret = -ENOMEM; + goto cleanup_dissector; + } + + cls->common.chain_index = 0; + cls->command = FLOW_CLS_REPLACE; + cls->cookie = dummy_cookie; + + rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); + if (!rule) { + ret = -ENOMEM; + goto cleanup_cls; + } + + rule->match.dissector = dissector; + rule->match.key = (void *)&keys; + rule->match.mask = (void *)&masks; + + keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP; + keys.key.src = htons(src); + keys.key.dst = htons(dst); + masks.mask.src = src_mask; + masks.mask.dst = dst_mask; + + cls->rule = rule; + + rule->action.entries[0].id = FLOW_ACTION_DROP; + rule->action.num_entries = 1; + + attr.dst = priv->dev->dev_addr; + attr.tcp = !udp; + attr.sport = src; + attr.dport = dst; + attr.ip_dst = 0; + + /* Shall receive packet */ + ret = __stmmac_test_loopback(priv, &attr); + if (ret) + goto cleanup_rule; + + ret = stmmac_tc_setup_cls(priv, priv, cls); + if (ret) + goto cleanup_rule; + + /* Shall NOT receive packet */ + ret = __stmmac_test_loopback(priv, &attr); + ret = ret ? 0 : -EINVAL; + + cls->command = FLOW_CLS_DESTROY; + stmmac_tc_setup_cls(priv, priv, cls); +cleanup_rule: + kfree(rule); +cleanup_cls: + kfree(cls); +cleanup_dissector: + kfree(dissector); +cleanup_rss: + if (priv->rss.enable) { + stmmac_rss_configure(priv, priv->hw, &priv->rss, + priv->plat->rx_queues_to_use); + } + + return ret; +} +#else +static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, + u32 dst_mask, u32 src_mask, bool udp) +{ + return -EOPNOTSUPP; +} +#endif + +static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv) +{ + u16 dummy_port = 0x123; + + return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false); +} + +static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv) +{ + u16 dummy_port = 0x123; + + return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false); +} + +static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv) +{ + u16 dummy_port = 0x123; + + return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true); +} + +static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv) +{ + u16 dummy_port = 0x123; + + return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true); +} + +static int stmmac_test_arp_validate(struct sk_buff *skb, + struct net_device *ndev, + struct packet_type *pt, + struct net_device *orig_ndev) +{ + struct stmmac_test_priv *tpriv = pt->af_packet_priv; + struct ethhdr *ehdr; + struct arphdr *ahdr; + + ehdr = (struct ethhdr *)skb_mac_header(skb); + if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src)) + goto out; + + ahdr = arp_hdr(skb); + if (ahdr->ar_op != htons(ARPOP_REPLY)) + goto out; + + tpriv->ok = true; + complete(&tpriv->comp); +out: + kfree_skb(skb); + return 0; +} + +static int stmmac_test_arpoffload(struct stmmac_priv *priv) +{ + unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06}; + unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + struct stmmac_packet_attrs attr = { }; + struct stmmac_test_priv *tpriv; + struct sk_buff *skb = NULL; + u32 ip_addr = 0xdeadcafe; + u32 ip_src = 0xdeadbeef; + int ret; + + if (!priv->dma_cap.arpoffsel) + return -EOPNOTSUPP; + + tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); + if (!tpriv) + return -ENOMEM; + + tpriv->ok = false; + init_completion(&tpriv->comp); + + tpriv->pt.type = htons(ETH_P_ARP); + tpriv->pt.func = stmmac_test_arp_validate; + tpriv->pt.dev = priv->dev; + tpriv->pt.af_packet_priv = tpriv; + tpriv->packet = &attr; + dev_add_pack(&tpriv->pt); + + attr.src = src; + attr.ip_src = ip_src; + attr.dst = dst; + attr.ip_dst = ip_addr; + + skb = stmmac_test_get_arp_skb(priv, &attr); + if (!skb) { + ret = -ENOMEM; + goto cleanup; + } + + ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr); + if (ret) + goto cleanup; + + ret = dev_set_promiscuity(priv->dev, 1); + if (ret) + goto cleanup; + + skb_set_queue_mapping(skb, 0); + ret = dev_queue_xmit(skb); + if (ret) + goto cleanup_promisc; + + wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); + ret = tpriv->ok ? 0 : -ETIMEDOUT; + +cleanup_promisc: + dev_set_promiscuity(priv->dev, -1); +cleanup: + stmmac_set_arp_offload(priv, priv->hw, false, 0x0); + dev_remove_pack(&tpriv->pt); + kfree(tpriv); + return ret; +} + +static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue) +{ + struct stmmac_packet_attrs attr = { }; + int size = priv->dma_buf_sz; + + /* Only XGMAC has SW support for multiple RX descs in same packet */ + if (priv->plat->has_xgmac) + size = priv->dev->max_mtu; + + attr.dst = priv->dev->dev_addr; + attr.max_size = size - ETH_FCS_LEN; + attr.queue_mapping = queue; + + return __stmmac_test_loopback(priv, &attr); +} + +static int stmmac_test_jumbo(struct stmmac_priv *priv) +{ + return __stmmac_test_jumbo(priv, 0); +} + +static int stmmac_test_mjumbo(struct stmmac_priv *priv) +{ + u32 chan, tx_cnt = priv->plat->tx_queues_to_use; + int ret; + + if (tx_cnt <= 1) + return -EOPNOTSUPP; + + for (chan = 0; chan < tx_cnt; chan++) { + ret = __stmmac_test_jumbo(priv, chan); + if (ret) + return ret; + } + + return 0; +} + +static int stmmac_test_sph(struct stmmac_priv *priv) +{ + unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n; + struct stmmac_packet_attrs attr = { }; + int ret; + + if (!priv->sph) + return -EOPNOTSUPP; + + /* Check for UDP first */ + attr.dst = priv->dev->dev_addr; + attr.tcp = false; + + ret = __stmmac_test_loopback(priv, &attr); + if (ret) + return ret; + + cnt_end = priv->xstats.rx_split_hdr_pkt_n; + if (cnt_end <= cnt_start) + return -EINVAL; + + /* Check for TCP now */ + cnt_start = cnt_end; + + attr.dst = priv->dev->dev_addr; + attr.tcp = true; + + ret = __stmmac_test_loopback(priv, &attr); + if (ret) + return ret; + + cnt_end = priv->xstats.rx_split_hdr_pkt_n; + if (cnt_end <= cnt_start) + return -EINVAL; + + return 0; +} + #define STMMAC_LOOPBACK_NONE 0 #define STMMAC_LOOPBACK_MAC 1 #define STMMAC_LOOPBACK_PHY 2 @@ -745,6 +1686,86 @@ static const struct stmmac_test { .name = "Flow Control ", .lb = STMMAC_LOOPBACK_PHY, .fn = stmmac_test_flowctrl, + }, { + .name = "RSS ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_rss, + }, { + .name = "VLAN Filtering ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_vlanfilt, + }, { + .name = "Double VLAN Filtering", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_dvlanfilt, + }, { + .name = "Flexible RX Parser ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_rxp, + }, { + .name = "SA Insertion (desc) ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_desc_sai, + }, { + .name = "SA Replacement (desc)", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_desc_sar, + }, { + .name = "SA Insertion (reg) ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_reg_sai, + }, { + .name = "SA Replacement (reg)", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_reg_sar, + }, { + .name = "VLAN TX Insertion ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_vlanoff, + }, { + .name = "SVLAN TX Insertion ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_svlanoff, + }, { + .name = "L3 DA Filtering ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_l3filt_da, + }, { + .name = "L3 SA Filtering ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_l3filt_sa, + }, { + .name = "L4 DA TCP Filtering ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_l4filt_da_tcp, + }, { + .name = "L4 SA TCP Filtering ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_l4filt_sa_tcp, + }, { + .name = "L4 DA UDP Filtering ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_l4filt_da_udp, + }, { + .name = "L4 SA UDP Filtering ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_l4filt_sa_udp, + }, { + .name = "ARP Offload ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_arpoffload, + }, { + .name = "Jumbo Frame ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_jumbo, + }, { + .name = "Multichannel Jumbo ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_mjumbo, + }, { + .name = "Split Header ", + .lb = STMMAC_LOOPBACK_PHY, + .fn = stmmac_test_sph, }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 6c305b6ecad0..e231098061b6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -242,9 +242,27 @@ static int tc_init(struct stmmac_priv *priv) { struct dma_features *dma_cap = &priv->dma_cap; unsigned int count; + int i; + + if (dma_cap->l3l4fnum) { + priv->flow_entries_max = dma_cap->l3l4fnum; + priv->flow_entries = devm_kcalloc(priv->device, + dma_cap->l3l4fnum, + sizeof(*priv->flow_entries), + GFP_KERNEL); + if (!priv->flow_entries) + return -ENOMEM; + for (i = 0; i < priv->flow_entries_max; i++) + priv->flow_entries[i].idx = i; + + dev_info(priv->device, "Enabled Flow TC (entries=%d)\n", + priv->flow_entries_max); + } + + /* Fail silently as we can still use remaining features, e.g. CBS */ if (!dma_cap->frpsel) - return -EINVAL; + return 0; switch (dma_cap->frpbs) { case 0x0: @@ -349,8 +367,235 @@ static int tc_setup_cbs(struct stmmac_priv *priv, return 0; } +static int tc_parse_flow_actions(struct stmmac_priv *priv, + struct flow_action *action, + struct stmmac_flow_entry *entry) +{ + struct flow_action_entry *act; + int i; + + if (!flow_action_has_entries(action)) + return -EINVAL; + + flow_action_for_each(i, act, action) { + switch (act->id) { + case FLOW_ACTION_DROP: + entry->action |= STMMAC_FLOW_ACTION_DROP; + return 0; + default: + break; + } + } + + /* Nothing to do, maybe inverse filter ? */ + return 0; +} + +static int tc_add_basic_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + struct flow_match_basic match; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) + return -EINVAL; + + flow_rule_match_basic(rule, &match); + entry->ip_proto = match.key->ip_proto; + return 0; +} + +static int tc_add_ip4_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + bool inv = entry->action & STMMAC_FLOW_ACTION_DROP; + struct flow_match_ipv4_addrs match; + u32 hw_match; + int ret; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) + return -EINVAL; + + flow_rule_match_ipv4_addrs(rule, &match); + hw_match = ntohl(match.key->src) & ntohl(match.mask->src); + if (hw_match) { + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true, + false, true, inv, hw_match); + if (ret) + return ret; + } + + hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst); + if (hw_match) { + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true, + false, false, inv, hw_match); + if (ret) + return ret; + } + + return 0; +} + +static int tc_add_ports_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + bool inv = entry->action & STMMAC_FLOW_ACTION_DROP; + struct flow_match_ports match; + u32 hw_match; + bool is_udp; + int ret; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) + return -EINVAL; + + switch (entry->ip_proto) { + case IPPROTO_TCP: + is_udp = false; + break; + case IPPROTO_UDP: + is_udp = true; + break; + default: + return -EINVAL; + } + + flow_rule_match_ports(rule, &match); + + hw_match = ntohs(match.key->src) & ntohs(match.mask->src); + if (hw_match) { + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true, + is_udp, true, inv, hw_match); + if (ret) + return ret; + } + + hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst); + if (hw_match) { + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true, + is_udp, false, inv, hw_match); + if (ret) + return ret; + } + + entry->is_l4 = true; + return 0; +} + +static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + bool get_free) +{ + int i; + + for (i = 0; i < priv->flow_entries_max; i++) { + struct stmmac_flow_entry *entry = &priv->flow_entries[i]; + + if (entry->cookie == cls->cookie) + return entry; + if (get_free && (entry->in_use == false)) + return entry; + } + + return NULL; +} + +struct { + int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry); +} tc_flow_parsers[] = { + { .fn = tc_add_basic_flow }, + { .fn = tc_add_ip4_flow }, + { .fn = tc_add_ports_flow }, +}; + +static int tc_add_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false); + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + int i, ret; + + if (!entry) { + entry = tc_find_flow(priv, cls, true); + if (!entry) + return -ENOENT; + } + + ret = tc_parse_flow_actions(priv, &rule->action, entry); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) { + ret = tc_flow_parsers[i].fn(priv, cls, entry); + if (!ret) { + entry->in_use = true; + continue; + } + } + + if (!entry->in_use) + return -EINVAL; + + entry->cookie = cls->cookie; + return 0; +} + +static int tc_del_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false); + int ret; + + if (!entry || !entry->in_use) + return -ENOENT; + + if (entry->is_l4) { + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false, + false, false, false, 0); + } else { + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false, + false, false, false, 0); + } + + entry->in_use = false; + entry->cookie = 0; + entry->is_l4 = false; + return ret; +} + +static int tc_setup_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret = 0; + + switch (cls->command) { + case FLOW_CLS_REPLACE: + ret = tc_add_flow(priv, cls); + break; + case FLOW_CLS_DESTROY: + ret = tc_del_flow(priv, cls); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + const struct stmmac_tc_ops dwmac510_tc_ops = { .init = tc_init, .setup_cls_u32 = tc_setup_cls_u32, .setup_cbs = tc_setup_cbs, + .setup_cls = tc_setup_cls, }; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 6fc05c106afc..c91876f8c536 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -2034,7 +2034,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, __skb_frag_set_page(frag, page->buffer); __skb_frag_ref(frag); - frag->page_offset = off; + skb_frag_off_set(frag, off); skb_frag_size_set(frag, hlen - swivel); /* any more data? */ @@ -2058,7 +2058,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, __skb_frag_set_page(frag, page->buffer); __skb_frag_ref(frag); - frag->page_offset = 0; + skb_frag_off_set(frag, 0); skb_frag_size_set(frag, hlen); RX_USED_ADD(page, hlen + cp->crc_size); } @@ -2816,7 +2816,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, DMA_TO_DEVICE); - tabort = cas_calc_tabort(cp, fragp->page_offset, len); + tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len); if (unlikely(tabort)) { void *addr; @@ -2827,7 +2827,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, addr = cas_page_map(skb_frag_page(fragp)); memcpy(tx_tiny_buf(cp, ring, entry), - addr + fragp->page_offset + len - tabort, + addr + skb_frag_off(fragp) + len - tabort, tabort); cas_page_unmap(addr); mapping = tx_tiny_map(cp, ring, entry, tentry); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 0bc5863bffeb..f5fd1f3c07cc 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6695,7 +6695,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, len = skb_frag_size(frag); mapping = np->ops->map_page(np->device, skb_frag_page(frag), - frag->page_offset, len, + skb_frag_off(frag), len, DMA_TO_DEVICE); rp->tx_buffs[prod].skb = NULL; diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index baa3088b475c..8b94d9ad9e2b 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1088,7 +1088,7 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, vaddr = kmap_atomic(skb_frag_page(f)); blen = skb_frag_size(f); blen += 8 - (blen & 7); - err = ldc_map_single(lp, vaddr + f->page_offset, + err = ldc_map_single(lp, vaddr + skb_frag_off(f), blen, cookies + nc, ncookies - nc, map_perm); kunmap_atomic(vaddr); @@ -1124,7 +1124,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - docopy |= f->page_offset & 7; + docopy |= skb_frag_off(f) & 7; } if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || skb_tailroom(skb) < pad || @@ -1532,8 +1532,7 @@ out_dropped: else if (port) del_timer(&port->clean_timer); rcu_read_unlock(); - if (skb) - dev_kfree_skb(skb); + dev_kfree_skb(skb); vnet_free_skbs(freeskbs); dev->stats.tx_dropped++; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c index 031cf9c3435a..8c4195a9a2cc 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c @@ -503,7 +503,7 @@ static int xlgmac_map_tx_skb(struct xlgmac_channel *channel, struct xlgmac_desc_data *desc_data; unsigned int offset, datalen, len; struct xlgmac_pkt_info *pkt_info; - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int tso, vlan; dma_addr_t skb_dma; unsigned int i; diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c index 1f8e9601592a..a1f5a1e61040 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -116,7 +116,7 @@ static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata, struct sk_buff *skb, struct xlgmac_pkt_info *pkt_info) { - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int context_desc; unsigned int len; unsigned int i; diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 5d6960fe3309..0f8a924fc60c 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1501,7 +1501,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb, bdx_tx_db_inc_wptr(db); for (i = 0; i < nr_frags; i++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag; frag = &skb_shinfo(skb)->frags[i]; db->wptr->len = skb_frag_size(frag); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index a46b8b2e44e1..f298d714efd6 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2764,7 +2764,7 @@ static int cpsw_probe(struct platform_device *pdev) struct net_device *ndev; struct cpsw_priv *priv; void __iomem *ss_regs; - struct resource *res, *ss_res; + struct resource *ss_res; struct gpio_descs *mode; const struct soc_device_attribute *soc; struct cpsw_common *cpsw; @@ -2799,8 +2799,7 @@ static int cpsw_probe(struct platform_device *pdev) return PTR_ERR(ss_regs); cpsw->regs = ss_regs; - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - cpsw->wr_regs = devm_ioremap_resource(dev, res); + cpsw->wr_regs = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(cpsw->wr_regs)) return PTR_ERR(cpsw->wr_regs); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 642843945031..1b2702f74455 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1116,7 +1116,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; struct page *page = skb_frag_page(frag); - u32 page_offset = frag->page_offset; + u32 page_offset = skb_frag_off(frag); u32 buf_len = skb_frag_size(frag); dma_addr_t desc_dma; u32 desc_dma_32; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 0f346761a2b2..538e70810d3d 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -2311,11 +2311,9 @@ spider_net_alloc_card(void) { struct net_device *netdev; struct spider_net_card *card; - size_t alloc_size; - alloc_size = sizeof(struct spider_net_card) + - (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr); - netdev = alloc_etherdev(alloc_size); + netdev = alloc_etherdev(struct_size(card, darray, + tx_descriptors + rx_descriptors)); if (!netdev) return NULL; diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index ab55416a10fa..ed12dbd156f0 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1127,15 +1127,13 @@ static int rhine_init_one_platform(struct platform_device *pdev) const struct of_device_id *match; const u32 *quirks; int irq; - struct resource *res; void __iomem *ioaddr; match = of_match_device(rhine_of_tbl, &pdev->dev); if (!match) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ioaddr = devm_ioremap_resource(&pdev->dev, res); + ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ioaddr)) return PTR_ERR(ioaddr); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index cb2ea8facd8d..3ab24fdccd3b 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1345,7 +1345,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], info->key.u.ipv4.dst = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); - if (IN_MULTICAST(ntohl(info->key.u.ipv4.dst))) { + if (ipv4_is_multicast(info->key.u.ipv4.dst)) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE], "Remote IPv4 address cannot be Multicast"); return -EINVAL; diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 9303aeb2595f..4476491b58f9 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -961,8 +961,7 @@ static int epp_close(struct net_device *dev) parport_write_control(pp, 0); /* reset the adapter */ parport_release(bc->pdev); parport_unregister_device(bc->pdev); - if (bc->skb) - dev_kfree_skb(bc->skb); + dev_kfree_skb(bc->skb); bc->skb = NULL; printk(KERN_INFO "%s: close epp at iobase 0x%lx irq %u\n", bc_drvname, dev->base_addr, dev->irq); diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index c6f83e0df0a3..df495b5595f5 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -475,8 +475,7 @@ static int hdlcdrv_close(struct net_device *dev) if (s->ops && s->ops->close) i = s->ops->close(dev); - if (s->skb) - dev_kfree_skb(s->skb); + dev_kfree_skb(s->skb); s->skb = NULL; s->opened = 0; return i; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 442018ccd65e..c5bfa19ddb93 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -25,6 +25,7 @@ #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/jiffies.h> +#include <linux/refcount.h> #include <net/ax25.h> @@ -70,7 +71,7 @@ struct mkiss { #define CRC_MODE_FLEX_TEST 3 #define CRC_MODE_SMACK_TEST 4 - atomic_t refcnt; + refcount_t refcnt; struct completion dead; }; @@ -668,7 +669,7 @@ static struct mkiss *mkiss_get(struct tty_struct *tty) read_lock(&disc_data_lock); ax = tty->disc_data; if (ax) - atomic_inc(&ax->refcnt); + refcount_inc(&ax->refcnt); read_unlock(&disc_data_lock); return ax; @@ -676,7 +677,7 @@ static struct mkiss *mkiss_get(struct tty_struct *tty) static void mkiss_put(struct mkiss *ax) { - if (atomic_dec_and_test(&ax->refcnt)) + if (refcount_dec_and_test(&ax->refcnt)) complete(&ax->dead); } @@ -704,7 +705,7 @@ static int mkiss_open(struct tty_struct *tty) ax->dev = dev; spin_lock_init(&ax->buflock); - atomic_set(&ax->refcnt, 1); + refcount_set(&ax->refcnt, 1); init_completion(&ax->dead); ax->tty = tty; @@ -784,7 +785,7 @@ static void mkiss_close(struct tty_struct *tty) * We have now ensured that nobody can start using ap from now on, but * we have to wait for all existing users to finish. */ - if (!atomic_dec_and_test(&ax->refcnt)) + if (!refcount_dec_and_test(&ax->refcnt)) wait_for_completion(&ax->dead); /* * Halt the transmit queue so that a new transmit cannot scribble diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index ecc9af050387..670ef682f268 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -822,7 +822,7 @@ struct nvsp_message { #define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \ NETIF_F_TSO | NETIF_F_IPV6_CSUM | \ - NETIF_F_TSO6 | NETIF_F_LRO) + NETIF_F_TSO6 | NETIF_F_LRO | NETIF_F_SG) #define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */ #define VRSS_CHANNEL_MAX 64 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index e8fce6d715ef..39dddcd8b3cb 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -435,7 +435,7 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, skb_frag_t *frag = skb_shinfo(skb)->frags + i; slots_used += fill_pg_buf(skb_frag_page(frag), - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag), &pb[slots_used]); } return slots_used; @@ -449,7 +449,7 @@ static int count_skb_frag_slots(struct sk_buff *skb) for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned long size = skb_frag_size(frag); - unsigned long offset = frag->page_offset; + unsigned long offset = skb_frag_off(frag); /* Skip unused frames from start of page */ offset &= ~PAGE_MASK; @@ -1785,13 +1785,15 @@ static int netvsc_set_features(struct net_device *ndev, netdev_features_t change = features ^ ndev->features; struct net_device_context *ndevctx = netdev_priv(ndev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); + struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); struct ndis_offload_params offloads; + int ret = 0; if (!nvdev || nvdev->destroy) return -ENODEV; if (!(change & NETIF_F_LRO)) - return 0; + goto syncvf; memset(&offloads, 0, sizeof(struct ndis_offload_params)); @@ -1803,7 +1805,19 @@ static int netvsc_set_features(struct net_device *ndev, offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; } - return rndis_filter_set_offload_params(ndev, nvdev, &offloads); + ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads); + + if (ret) + features ^= NETIF_F_LRO; + +syncvf: + if (!vf_netdev) + return ret; + + vf_netdev->wanted_features = features; + netdev_update_features(vf_netdev); + + return ret; } static u32 netvsc_get_msglevel(struct net_device *ndev) @@ -2181,6 +2195,10 @@ static int netvsc_register_vf(struct net_device *vf_netdev) dev_hold(vf_netdev); rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); + + vf_netdev->wanted_features = ndev->features; + netdev_update_features(vf_netdev); + return NOTIFY_OK; } @@ -2313,8 +2331,8 @@ static int netvsc_probe(struct hv_device *dev, /* hw_features computed in rndis_netdev_set_hwcaps() */ net->features = net->hw_features | - NETIF_F_HIGHDMA | NETIF_F_SG | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; net->vlan_features = net->features; netdev_lockdep_set_classes(net); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 317dbe9356c2..abaf8156d19d 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1207,6 +1207,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, /* Compute tx offload settings based on hw capabilities */ net->hw_features |= NETIF_F_RXCSUM; + net->hw_features |= NETIF_F_SG; if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) { /* Can checksum TCP */ diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index c9392d70e639..5a37514e4234 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1158,23 +1158,16 @@ static int adf7242_stats_show(struct seq_file *file, void *offset) return 0; } -static int adf7242_debugfs_init(struct adf7242_local *lp) +static void adf7242_debugfs_init(struct adf7242_local *lp) { char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-"; - struct dentry *stats; strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN); lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL); - if (IS_ERR_OR_NULL(lp->debugfs_root)) - return PTR_ERR_OR_ZERO(lp->debugfs_root); - stats = debugfs_create_devm_seqfile(&lp->spi->dev, "status", - lp->debugfs_root, - adf7242_stats_show); - return PTR_ERR_OR_ZERO(stats); - - return 0; + debugfs_create_devm_seqfile(&lp->spi->dev, "status", lp->debugfs_root, + adf7242_stats_show); } static const s32 adf7242_powers[] = { diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 595cf7e2a651..7d67f41387f5 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1626,24 +1626,16 @@ static int at86rf230_stats_show(struct seq_file *file, void *offset) } DEFINE_SHOW_ATTRIBUTE(at86rf230_stats); -static int at86rf230_debugfs_init(struct at86rf230_local *lp) +static void at86rf230_debugfs_init(struct at86rf230_local *lp) { char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "at86rf230-"; - struct dentry *stats; strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN); at86rf230_debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL); - if (!at86rf230_debugfs_root) - return -ENOMEM; - - stats = debugfs_create_file("trac_stats", 0444, - at86rf230_debugfs_root, lp, - &at86rf230_stats_fops); - if (!stats) - return -ENOMEM; - return 0; + debugfs_create_file("trac_stats", 0444, at86rf230_debugfs_root, lp, + &at86rf230_stats_fops); } static void at86rf230_debugfs_remove(void) @@ -1651,7 +1643,7 @@ static void at86rf230_debugfs_remove(void) debugfs_remove_recursive(at86rf230_debugfs_root); } #else -static int at86rf230_debugfs_init(struct at86rf230_local *lp) { return 0; } +static void at86rf230_debugfs_init(struct at86rf230_local *lp) { } static void at86rf230_debugfs_remove(void) { } #endif @@ -1751,9 +1743,7 @@ static int at86rf230_probe(struct spi_device *spi) /* going into sleep by default */ at86rf230_sleep(lp); - rc = at86rf230_debugfs_init(lp); - if (rc) - goto free_dev; + at86rf230_debugfs_init(lp); rc = ieee802154_register_hw(lp->hw); if (rc) diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index b188fce3f641..11402dc347db 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -3019,14 +3019,7 @@ static int ca8210_test_interface_init(struct ca8210_priv *priv) priv, &test_int_fops ); - if (IS_ERR(test->ca8210_dfs_spi_int)) { - dev_err( - &priv->spi->dev, - "Error %ld when creating debugfs node\n", - PTR_ERR(test->ca8210_dfs_spi_int) - ); - return PTR_ERR(test->ca8210_dfs_spi_int); - } + debugfs_create_symlink("ca8210", NULL, node_name); init_waitqueue_head(&test->readq); return kfifo_alloc( diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 1c96bed5a7c4..887bbba4631e 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -126,6 +126,7 @@ static int ipvlan_init(struct net_device *dev) (phy_dev->state & IPVLAN_STATE_MASK); dev->features = phy_dev->features & IPVLAN_FEATURES; dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED; + dev->hw_enc_features |= dev->features; dev->gso_max_size = phy_dev->gso_max_size; dev->gso_max_segs = phy_dev->gso_max_segs; dev->hard_header_len = phy_dev->hard_header_len; diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index bcc40a236624..56576d4f34a5 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -17,16 +17,60 @@ #include <linux/debugfs.h> #include <linux/device.h> +#include <linux/etherdevice.h> +#include <linux/inet.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/random.h> #include <linux/rtnetlink.h> +#include <linux/workqueue.h> #include <net/devlink.h> +#include <net/ip.h> +#include <uapi/linux/devlink.h> +#include <uapi/linux/ip.h> +#include <uapi/linux/udp.h> #include "netdevsim.h" static struct dentry *nsim_dev_ddir; +#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32) + +static ssize_t nsim_dev_take_snapshot_write(struct file *file, + const char __user *data, + size_t count, loff_t *ppos) +{ + struct nsim_dev *nsim_dev = file->private_data; + void *dummy_data; + int err; + u32 id; + + dummy_data = kmalloc(NSIM_DEV_DUMMY_REGION_SIZE, GFP_KERNEL); + if (!dummy_data) + return -ENOMEM; + + get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE); + + id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev)); + err = devlink_region_snapshot_create(nsim_dev->dummy_region, + dummy_data, id, kfree); + if (err) { + pr_err("Failed to create region snapshot\n"); + kfree(dummy_data); + return err; + } + + return count; +} + +static const struct file_operations nsim_dev_take_snapshot_fops = { + .open = simple_open, + .write = nsim_dev_take_snapshot_write, + .llseek = generic_file_llseek, +}; + static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev) { char dev_ddir_name[16]; @@ -40,6 +84,12 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev) return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL; debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir, &nsim_dev->fw_update_status); + debugfs_create_u32("max_macs", 0600, nsim_dev->ddir, + &nsim_dev->max_macs); + debugfs_create_bool("test1", 0600, nsim_dev->ddir, + &nsim_dev->test1); + debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev, + &nsim_dev_take_snapshot_fops); return 0; } @@ -193,8 +243,295 @@ out: return err; } -static int nsim_dev_reload(struct devlink *devlink, - struct netlink_ext_ack *extack) +enum nsim_devlink_param_id { + NSIM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + NSIM_DEVLINK_PARAM_ID_TEST1, +}; + +static const struct devlink_param nsim_devlink_params[] = { + DEVLINK_PARAM_GENERIC(MAX_MACS, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL), + DEVLINK_PARAM_DRIVER(NSIM_DEVLINK_PARAM_ID_TEST1, + "test1", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL), +}; + +static void nsim_devlink_set_params_init_values(struct nsim_dev *nsim_dev, + struct devlink *devlink) +{ + union devlink_param_value value; + + value.vu32 = nsim_dev->max_macs; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + value); + value.vbool = nsim_dev->test1; + devlink_param_driverinit_value_set(devlink, + NSIM_DEVLINK_PARAM_ID_TEST1, + value); +} + +static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink) +{ + struct nsim_dev *nsim_dev = devlink_priv(devlink); + union devlink_param_value saved_value; + int err; + + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + &saved_value); + if (!err) + nsim_dev->max_macs = saved_value.vu32; + err = devlink_param_driverinit_value_get(devlink, + NSIM_DEVLINK_PARAM_ID_TEST1, + &saved_value); + if (!err) + nsim_dev->test1 = saved_value.vbool; +} + +#define NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX 16 + +static int nsim_dev_dummy_region_init(struct nsim_dev *nsim_dev, + struct devlink *devlink) +{ + nsim_dev->dummy_region = + devlink_region_create(devlink, "dummy", + NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX, + NSIM_DEV_DUMMY_REGION_SIZE); + return PTR_ERR_OR_ZERO(nsim_dev->dummy_region); +} + +static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev) +{ + devlink_region_destroy(nsim_dev->dummy_region); +} + +struct nsim_trap_item { + void *trap_ctx; + enum devlink_trap_action action; +}; + +struct nsim_trap_data { + struct delayed_work trap_report_dw; + struct nsim_trap_item *trap_items_arr; + struct nsim_dev *nsim_dev; + spinlock_t trap_lock; /* Protects trap_items_arr */ +}; + +/* All driver-specific traps must be documented in + * Documentation/networking/devlink-trap-netdevsim.rst + */ +enum { + NSIM_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX, + NSIM_TRAP_ID_FID_MISS, +}; + +#define NSIM_TRAP_NAME_FID_MISS "fid_miss" + +#define NSIM_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT + +#define NSIM_TRAP_DROP(_id, _group_id) \ + DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \ + DEVLINK_TRAP_GROUP_GENERIC(_group_id), \ + NSIM_TRAP_METADATA) +#define NSIM_TRAP_EXCEPTION(_id, _group_id) \ + DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \ + DEVLINK_TRAP_GROUP_GENERIC(_group_id), \ + NSIM_TRAP_METADATA) +#define NSIM_TRAP_DRIVER_EXCEPTION(_id, _group_id) \ + DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, NSIM_TRAP_ID_##_id, \ + NSIM_TRAP_NAME_##_id, \ + DEVLINK_TRAP_GROUP_GENERIC(_group_id), \ + NSIM_TRAP_METADATA) + +static const struct devlink_trap nsim_traps_arr[] = { + NSIM_TRAP_DROP(SMAC_MC, L2_DROPS), + NSIM_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS), + NSIM_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS), + NSIM_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS), + NSIM_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS), + NSIM_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS), + NSIM_TRAP_DRIVER_EXCEPTION(FID_MISS, L2_DROPS), + NSIM_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS), + NSIM_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS), + NSIM_TRAP_DROP(TAIL_DROP, BUFFER_DROPS), +}; + +#define NSIM_TRAP_L4_DATA_LEN 100 + +static struct sk_buff *nsim_dev_trap_skb_build(void) +{ + int tot_len, data_len = NSIM_TRAP_L4_DATA_LEN; + struct sk_buff *skb; + struct udphdr *udph; + struct ethhdr *eth; + struct iphdr *iph; + + skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); + if (!skb) + return NULL; + tot_len = sizeof(struct iphdr) + sizeof(struct udphdr) + data_len; + + skb_reset_mac_header(skb); + eth = skb_put(skb, sizeof(struct ethhdr)); + eth_random_addr(eth->h_dest); + eth_random_addr(eth->h_source); + eth->h_proto = htons(ETH_P_IP); + skb->protocol = htons(ETH_P_IP); + + skb_set_network_header(skb, skb->len); + iph = skb_put(skb, sizeof(struct iphdr)); + iph->protocol = IPPROTO_UDP; + iph->saddr = in_aton("192.0.2.1"); + iph->daddr = in_aton("198.51.100.1"); + iph->version = 0x4; + iph->frag_off = 0; + iph->ihl = 0x5; + iph->tot_len = htons(tot_len); + iph->ttl = 100; + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + + skb_set_transport_header(skb, skb->len); + udph = skb_put_zero(skb, sizeof(struct udphdr) + data_len); + get_random_bytes(&udph->source, sizeof(u16)); + get_random_bytes(&udph->dest, sizeof(u16)); + udph->len = htons(sizeof(struct udphdr) + data_len); + + return skb; +} + +static void nsim_dev_trap_report(struct nsim_dev_port *nsim_dev_port) +{ + struct nsim_dev *nsim_dev = nsim_dev_port->ns->nsim_dev; + struct devlink *devlink = priv_to_devlink(nsim_dev); + struct nsim_trap_data *nsim_trap_data; + int i; + + nsim_trap_data = nsim_dev->trap_data; + + spin_lock(&nsim_trap_data->trap_lock); + for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) { + struct nsim_trap_item *nsim_trap_item; + struct sk_buff *skb; + + nsim_trap_item = &nsim_trap_data->trap_items_arr[i]; + if (nsim_trap_item->action == DEVLINK_TRAP_ACTION_DROP) + continue; + + skb = nsim_dev_trap_skb_build(); + if (!skb) + continue; + skb->dev = nsim_dev_port->ns->netdev; + + /* Trapped packets are usually passed to devlink in softIRQ, + * but in this case they are generated in a workqueue. Disable + * softIRQs to prevent lockdep from complaining about + * "incosistent lock state". + */ + local_bh_disable(); + devlink_trap_report(devlink, skb, nsim_trap_item->trap_ctx, + &nsim_dev_port->devlink_port); + local_bh_enable(); + consume_skb(skb); + } + spin_unlock(&nsim_trap_data->trap_lock); +} + +#define NSIM_TRAP_REPORT_INTERVAL_MS 100 + +static void nsim_dev_trap_report_work(struct work_struct *work) +{ + struct nsim_trap_data *nsim_trap_data; + struct nsim_dev_port *nsim_dev_port; + struct nsim_dev *nsim_dev; + + nsim_trap_data = container_of(work, struct nsim_trap_data, + trap_report_dw.work); + nsim_dev = nsim_trap_data->nsim_dev; + + /* For each running port and enabled packet trap, generate a UDP + * packet with a random 5-tuple and report it. + */ + mutex_lock(&nsim_dev->port_list_lock); + list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) { + if (!netif_running(nsim_dev_port->ns->netdev)) + continue; + + nsim_dev_trap_report(nsim_dev_port); + } + mutex_unlock(&nsim_dev->port_list_lock); + + schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, + msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS)); +} + +static int nsim_dev_traps_init(struct devlink *devlink) +{ + struct nsim_dev *nsim_dev = devlink_priv(devlink); + struct nsim_trap_data *nsim_trap_data; + int err; + + nsim_trap_data = kzalloc(sizeof(*nsim_trap_data), GFP_KERNEL); + if (!nsim_trap_data) + return -ENOMEM; + + nsim_trap_data->trap_items_arr = kcalloc(ARRAY_SIZE(nsim_traps_arr), + sizeof(struct nsim_trap_item), + GFP_KERNEL); + if (!nsim_trap_data->trap_items_arr) { + err = -ENOMEM; + goto err_trap_data_free; + } + + /* The lock is used to protect the action state of the registered + * traps. The value is written by user and read in delayed work when + * iterating over all the traps. + */ + spin_lock_init(&nsim_trap_data->trap_lock); + nsim_trap_data->nsim_dev = nsim_dev; + nsim_dev->trap_data = nsim_trap_data; + + err = devlink_traps_register(devlink, nsim_traps_arr, + ARRAY_SIZE(nsim_traps_arr), NULL); + if (err) + goto err_trap_items_free; + + INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw, + nsim_dev_trap_report_work); + schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, + msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS)); + + return 0; + +err_trap_items_free: + kfree(nsim_trap_data->trap_items_arr); +err_trap_data_free: + kfree(nsim_trap_data); + return err; +} + +static void nsim_dev_traps_exit(struct devlink *devlink) +{ + struct nsim_dev *nsim_dev = devlink_priv(devlink); + + cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw); + devlink_traps_unregister(devlink, nsim_traps_arr, + ARRAY_SIZE(nsim_traps_arr)); + kfree(nsim_dev->trap_data->trap_items_arr); + kfree(nsim_dev->trap_data); +} + +static int nsim_dev_reload_down(struct devlink *devlink, + struct netlink_ext_ack *extack) +{ + return 0; +} + +static int nsim_dev_reload_up(struct devlink *devlink, + struct netlink_ext_ack *extack) { enum nsim_resource_id res_ids[] = { NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, @@ -214,6 +551,7 @@ static int nsim_dev_reload(struct devlink *devlink, return err; } } + nsim_devlink_param_load_driverinit_values(devlink); return 0; } @@ -258,11 +596,67 @@ static int nsim_dev_flash_update(struct devlink *devlink, const char *file_name, return 0; } +static struct nsim_trap_item * +nsim_dev_trap_item_lookup(struct nsim_dev *nsim_dev, u16 trap_id) +{ + struct nsim_trap_data *nsim_trap_data = nsim_dev->trap_data; + int i; + + for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) { + if (nsim_traps_arr[i].id == trap_id) + return &nsim_trap_data->trap_items_arr[i]; + } + + return NULL; +} + +static int nsim_dev_devlink_trap_init(struct devlink *devlink, + const struct devlink_trap *trap, + void *trap_ctx) +{ + struct nsim_dev *nsim_dev = devlink_priv(devlink); + struct nsim_trap_item *nsim_trap_item; + + nsim_trap_item = nsim_dev_trap_item_lookup(nsim_dev, trap->id); + if (WARN_ON(!nsim_trap_item)) + return -ENOENT; + + nsim_trap_item->trap_ctx = trap_ctx; + nsim_trap_item->action = trap->init_action; + + return 0; +} + +static int +nsim_dev_devlink_trap_action_set(struct devlink *devlink, + const struct devlink_trap *trap, + enum devlink_trap_action action) +{ + struct nsim_dev *nsim_dev = devlink_priv(devlink); + struct nsim_trap_item *nsim_trap_item; + + nsim_trap_item = nsim_dev_trap_item_lookup(nsim_dev, trap->id); + if (WARN_ON(!nsim_trap_item)) + return -ENOENT; + + spin_lock(&nsim_dev->trap_data->trap_lock); + nsim_trap_item->action = action; + spin_unlock(&nsim_dev->trap_data->trap_lock); + + return 0; +} + static const struct devlink_ops nsim_dev_devlink_ops = { - .reload = nsim_dev_reload, + .reload_down = nsim_dev_reload_down, + .reload_up = nsim_dev_reload_up, .flash_update = nsim_dev_flash_update, + .trap_init = nsim_dev_devlink_trap_init, + .trap_action_set = nsim_dev_devlink_trap_action_set, }; +#define NSIM_DEV_MAX_MACS_DEFAULT 32 +#define NSIM_DEV_TEST1_DEFAULT true + static struct nsim_dev * nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count) { @@ -280,6 +674,8 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count) INIT_LIST_HEAD(&nsim_dev->port_list); mutex_init(&nsim_dev->port_list_lock); nsim_dev->fw_update_status = true; + nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT; + nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT; err = nsim_dev_resources_register(devlink); if (err) @@ -289,18 +685,40 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count) if (err) goto err_resources_unregister; - err = nsim_dev_debugfs_init(nsim_dev); + err = devlink_params_register(devlink, nsim_devlink_params, + ARRAY_SIZE(nsim_devlink_params)); if (err) goto err_dl_unregister; + nsim_devlink_set_params_init_values(nsim_dev, devlink); + + err = nsim_dev_dummy_region_init(nsim_dev, devlink); + if (err) + goto err_params_unregister; + + err = nsim_dev_traps_init(devlink); + if (err) + goto err_dummy_region_exit; + + err = nsim_dev_debugfs_init(nsim_dev); + if (err) + goto err_traps_exit; err = nsim_bpf_dev_init(nsim_dev); if (err) goto err_debugfs_exit; + devlink_params_publish(devlink); return nsim_dev; err_debugfs_exit: nsim_dev_debugfs_exit(nsim_dev); +err_traps_exit: + nsim_dev_traps_exit(devlink); +err_dummy_region_exit: + nsim_dev_dummy_region_exit(nsim_dev); +err_params_unregister: + devlink_params_unregister(devlink, nsim_devlink_params, + ARRAY_SIZE(nsim_devlink_params)); err_dl_unregister: devlink_unregister(devlink); err_resources_unregister: @@ -316,6 +734,10 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev) nsim_bpf_dev_exit(nsim_dev); nsim_dev_debugfs_exit(nsim_dev); + nsim_dev_traps_exit(devlink); + nsim_dev_dummy_region_exit(nsim_dev); + devlink_params_unregister(devlink, nsim_devlink_params, + ARRAY_SIZE(nsim_devlink_params)); devlink_unregister(devlink); devlink_resources_unregister(devlink, NULL); mutex_destroy(&nsim_dev->port_list_lock); diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 9404637d34b7..66bf13765ad0 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -145,6 +145,7 @@ struct nsim_dev_port { struct nsim_dev { struct nsim_bus_dev *nsim_bus_dev; struct nsim_fib_data *fib_data; + struct nsim_trap_data *trap_data; struct dentry *ddir; struct dentry *ports_ddir; struct bpf_offload_dev *bpf_dev; @@ -158,6 +159,9 @@ struct nsim_dev { struct list_head port_list; struct mutex port_list_lock; /* protects port list */ bool fw_update_status; + u32 max_macs; + bool test1; + struct devlink_region *dummy_region; }; int nsim_dev_init(void); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 20f14c5fbb7e..03be30cde552 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -21,6 +21,19 @@ config MDIO_BUS if MDIO_BUS +config MDIO_ASPEED + tristate "ASPEED MDIO bus controller" + depends on ARCH_ASPEED || COMPILE_TEST + depends on OF_MDIO && HAS_IOMEM + help + This module provides a driver for the independent MDIO bus + controllers found in the ASPEED AST2600 SoC. This is a driver for the + third revision of the ASPEED MDIO register interface - the first two + revisions are the "old" and "new" interfaces found in the AST2400 and + AST2500, embedded in the MAC. For legacy reasons, FTGMAC100 driver + continues to drive the embedded MDIO controller for the AST2400 and + AST2500 SoCs, so say N if AST2600 support is not required. + config MDIO_BCM_IPROC tristate "Broadcom iProc MDIO bus controller" depends on ARCH_BCM_IPROC || COMPILE_TEST @@ -159,8 +172,8 @@ config MDIO_MSCC_MIIM config MDIO_OCTEON tristate "Octeon and some ThunderX SOCs MDIO buses" - depends on 64BIT - depends on HAS_IOMEM && OF_MDIO + depends on (64BIT && OF_MDIO) || COMPILE_TEST + depends on HAS_IOMEM select MDIO_CAVIUM help This module provides a driver for the Octeon and ThunderX MDIO @@ -244,6 +257,15 @@ config SFP depends on HWMON || HWMON=n select MDIO_I2C +config ADIN_PHY + tristate "Analog Devices Industrial Ethernet PHYs" + help + Adds support for the Analog Devices Industrial Ethernet PHYs. + Currently supports the: + - ADIN1200 - Robust,Industrial, Low Power 10/100 Ethernet PHY + - ADIN1300 - Robust,Industrial, Low Latency 10/100/1000 Gigabit + Ethernet PHY + config AMD_PHY tristate "AMD PHYs" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 839acb292c38..a03437e091f3 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -22,6 +22,7 @@ libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o obj-$(CONFIG_PHYLINK) += phylink.o obj-$(CONFIG_PHYLIB) += libphy.o +obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o @@ -46,6 +47,7 @@ obj-$(CONFIG_SFP) += sfp.o sfp-obj-$(CONFIG_SFP) += sfp-bus.o obj-y += $(sfp-obj-y) $(sfp-obj-m) +obj-$(CONFIG_ADIN_PHY) += adin.o obj-$(CONFIG_AMD_PHY) += amd.o aquantia-objs += aquantia_main.o ifdef CONFIG_HWMON diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c new file mode 100644 index 000000000000..cf5a391c93e6 --- /dev/null +++ b/drivers/net/phy/adin.c @@ -0,0 +1,781 @@ +// SPDX-License-Identifier: GPL-2.0+ +/** + * Driver for Analog Devices Industrial Ethernet PHYs + * + * Copyright 2019 Analog Devices Inc. + */ +#include <linux/kernel.h> +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mii.h> +#include <linux/phy.h> +#include <linux/property.h> + +#define PHY_ID_ADIN1200 0x0283bc20 +#define PHY_ID_ADIN1300 0x0283bc30 + +#define ADIN1300_MII_EXT_REG_PTR 0x0010 +#define ADIN1300_MII_EXT_REG_DATA 0x0011 + +#define ADIN1300_PHY_CTRL1 0x0012 +#define ADIN1300_AUTO_MDI_EN BIT(10) +#define ADIN1300_MAN_MDIX_EN BIT(9) + +#define ADIN1300_RX_ERR_CNT 0x0014 + +#define ADIN1300_PHY_CTRL_STATUS2 0x0015 +#define ADIN1300_NRG_PD_EN BIT(3) +#define ADIN1300_NRG_PD_TX_EN BIT(2) +#define ADIN1300_NRG_PD_STATUS BIT(1) + +#define ADIN1300_PHY_CTRL2 0x0016 +#define ADIN1300_DOWNSPEED_AN_100_EN BIT(11) +#define ADIN1300_DOWNSPEED_AN_10_EN BIT(10) +#define ADIN1300_GROUP_MDIO_EN BIT(6) +#define ADIN1300_DOWNSPEEDS_EN \ + (ADIN1300_DOWNSPEED_AN_100_EN | ADIN1300_DOWNSPEED_AN_10_EN) + +#define ADIN1300_PHY_CTRL3 0x0017 +#define ADIN1300_LINKING_EN BIT(13) +#define ADIN1300_DOWNSPEED_RETRIES_MSK GENMASK(12, 10) + +#define ADIN1300_INT_MASK_REG 0x0018 +#define ADIN1300_INT_MDIO_SYNC_EN BIT(9) +#define ADIN1300_INT_ANEG_STAT_CHNG_EN BIT(8) +#define ADIN1300_INT_ANEG_PAGE_RX_EN BIT(6) +#define ADIN1300_INT_IDLE_ERR_CNT_EN BIT(5) +#define ADIN1300_INT_MAC_FIFO_OU_EN BIT(4) +#define ADIN1300_INT_RX_STAT_CHNG_EN BIT(3) +#define ADIN1300_INT_LINK_STAT_CHNG_EN BIT(2) +#define ADIN1300_INT_SPEED_CHNG_EN BIT(1) +#define ADIN1300_INT_HW_IRQ_EN BIT(0) +#define ADIN1300_INT_MASK_EN \ + (ADIN1300_INT_LINK_STAT_CHNG_EN | ADIN1300_INT_HW_IRQ_EN) +#define ADIN1300_INT_STATUS_REG 0x0019 + +#define ADIN1300_PHY_STATUS1 0x001a +#define ADIN1300_PAIR_01_SWAP BIT(11) + +/* EEE register addresses, accessible via Clause 22 access using + * ADIN1300_MII_EXT_REG_PTR & ADIN1300_MII_EXT_REG_DATA. + * The bit-fields are the same as specified by IEEE for EEE. + */ +#define ADIN1300_EEE_CAP_REG 0x8000 +#define ADIN1300_EEE_ADV_REG 0x8001 +#define ADIN1300_EEE_LPABLE_REG 0x8002 +#define ADIN1300_CLOCK_STOP_REG 0x9400 +#define ADIN1300_LPI_WAKE_ERR_CNT_REG 0xa000 + +#define ADIN1300_GE_SOFT_RESET_REG 0xff0c +#define ADIN1300_GE_SOFT_RESET BIT(0) + +#define ADIN1300_GE_RGMII_CFG_REG 0xff23 +#define ADIN1300_GE_RGMII_RX_MSK GENMASK(8, 6) +#define ADIN1300_GE_RGMII_RX_SEL(x) \ + FIELD_PREP(ADIN1300_GE_RGMII_RX_MSK, x) +#define ADIN1300_GE_RGMII_GTX_MSK GENMASK(5, 3) +#define ADIN1300_GE_RGMII_GTX_SEL(x) \ + FIELD_PREP(ADIN1300_GE_RGMII_GTX_MSK, x) +#define ADIN1300_GE_RGMII_RXID_EN BIT(2) +#define ADIN1300_GE_RGMII_TXID_EN BIT(1) +#define ADIN1300_GE_RGMII_EN BIT(0) + +/* RGMII internal delay settings for rx and tx for ADIN1300 */ +#define ADIN1300_RGMII_1_60_NS 0x0001 +#define ADIN1300_RGMII_1_80_NS 0x0002 +#define ADIN1300_RGMII_2_00_NS 0x0000 +#define ADIN1300_RGMII_2_20_NS 0x0006 +#define ADIN1300_RGMII_2_40_NS 0x0007 + +#define ADIN1300_GE_RMII_CFG_REG 0xff24 +#define ADIN1300_GE_RMII_FIFO_DEPTH_MSK GENMASK(6, 4) +#define ADIN1300_GE_RMII_FIFO_DEPTH_SEL(x) \ + FIELD_PREP(ADIN1300_GE_RMII_FIFO_DEPTH_MSK, x) +#define ADIN1300_GE_RMII_EN BIT(0) + +/* RMII fifo depth values */ +#define ADIN1300_RMII_4_BITS 0x0000 +#define ADIN1300_RMII_8_BITS 0x0001 +#define ADIN1300_RMII_12_BITS 0x0002 +#define ADIN1300_RMII_16_BITS 0x0003 +#define ADIN1300_RMII_20_BITS 0x0004 +#define ADIN1300_RMII_24_BITS 0x0005 + +/** + * struct adin_cfg_reg_map - map a config value to aregister value + * @cfg value in device configuration + * @reg value in the register + */ +struct adin_cfg_reg_map { + int cfg; + int reg; +}; + +static const struct adin_cfg_reg_map adin_rgmii_delays[] = { + { 1600, ADIN1300_RGMII_1_60_NS }, + { 1800, ADIN1300_RGMII_1_80_NS }, + { 2000, ADIN1300_RGMII_2_00_NS }, + { 2200, ADIN1300_RGMII_2_20_NS }, + { 2400, ADIN1300_RGMII_2_40_NS }, + { }, +}; + +static const struct adin_cfg_reg_map adin_rmii_fifo_depths[] = { + { 4, ADIN1300_RMII_4_BITS }, + { 8, ADIN1300_RMII_8_BITS }, + { 12, ADIN1300_RMII_12_BITS }, + { 16, ADIN1300_RMII_16_BITS }, + { 20, ADIN1300_RMII_20_BITS }, + { 24, ADIN1300_RMII_24_BITS }, + { }, +}; + +/** + * struct adin_clause45_mmd_map - map to convert Clause 45 regs to Clause 22 + * @devad device address used in Clause 45 access + * @cl45_regnum register address defined by Clause 45 + * @adin_regnum equivalent register address accessible via Clause 22 + */ +struct adin_clause45_mmd_map { + int devad; + u16 cl45_regnum; + u16 adin_regnum; +}; + +static struct adin_clause45_mmd_map adin_clause45_mmd_map[] = { + { MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE, ADIN1300_EEE_CAP_REG }, + { MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, ADIN1300_EEE_LPABLE_REG }, + { MDIO_MMD_AN, MDIO_AN_EEE_ADV, ADIN1300_EEE_ADV_REG }, + { MDIO_MMD_PCS, MDIO_CTRL1, ADIN1300_CLOCK_STOP_REG }, + { MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR, ADIN1300_LPI_WAKE_ERR_CNT_REG }, +}; + +struct adin_hw_stat { + const char *string; + u16 reg1; + u16 reg2; +}; + +static struct adin_hw_stat adin_hw_stats[] = { + { "total_frames_checked_count", 0x940A, 0x940B }, /* hi + lo */ + { "length_error_frames_count", 0x940C }, + { "alignment_error_frames_count", 0x940D }, + { "symbol_error_count", 0x940E }, + { "oversized_frames_count", 0x940F }, + { "undersized_frames_count", 0x9410 }, + { "odd_nibble_frames_count", 0x9411 }, + { "odd_preamble_packet_count", 0x9412 }, + { "dribble_bits_frames_count", 0x9413 }, + { "false_carrier_events_count", 0x9414 }, +}; + +/** + * struct adin_priv - ADIN PHY driver private data + * stats statistic counters for the PHY + */ +struct adin_priv { + u64 stats[ARRAY_SIZE(adin_hw_stats)]; +}; + +static int adin_lookup_reg_value(const struct adin_cfg_reg_map *tbl, int cfg) +{ + size_t i; + + for (i = 0; tbl[i].cfg; i++) { + if (tbl[i].cfg == cfg) + return tbl[i].reg; + } + + return -EINVAL; +} + +static u32 adin_get_reg_value(struct phy_device *phydev, + const char *prop_name, + const struct adin_cfg_reg_map *tbl, + u32 dflt) +{ + struct device *dev = &phydev->mdio.dev; + u32 val; + int rc; + + if (device_property_read_u32(dev, prop_name, &val)) + return dflt; + + rc = adin_lookup_reg_value(tbl, val); + if (rc < 0) { + phydev_warn(phydev, + "Unsupported value %u for %s using default (%u)\n", + val, prop_name, dflt); + return dflt; + } + + return rc; +} + +static int adin_config_rgmii_mode(struct phy_device *phydev) +{ + u32 val; + int reg; + + if (!phy_interface_is_rgmii(phydev)) + return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + ADIN1300_GE_RGMII_CFG_REG, + ADIN1300_GE_RGMII_EN); + + reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_GE_RGMII_CFG_REG); + if (reg < 0) + return reg; + + reg |= ADIN1300_GE_RGMII_EN; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { + reg |= ADIN1300_GE_RGMII_RXID_EN; + + val = adin_get_reg_value(phydev, "adi,rx-internal-delay-ps", + adin_rgmii_delays, + ADIN1300_RGMII_2_00_NS); + reg &= ~ADIN1300_GE_RGMII_RX_MSK; + reg |= ADIN1300_GE_RGMII_RX_SEL(val); + } else { + reg &= ~ADIN1300_GE_RGMII_RXID_EN; + } + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { + reg |= ADIN1300_GE_RGMII_TXID_EN; + + val = adin_get_reg_value(phydev, "adi,tx-internal-delay-ps", + adin_rgmii_delays, + ADIN1300_RGMII_2_00_NS); + reg &= ~ADIN1300_GE_RGMII_GTX_MSK; + reg |= ADIN1300_GE_RGMII_GTX_SEL(val); + } else { + reg &= ~ADIN1300_GE_RGMII_TXID_EN; + } + + return phy_write_mmd(phydev, MDIO_MMD_VEND1, + ADIN1300_GE_RGMII_CFG_REG, reg); +} + +static int adin_config_rmii_mode(struct phy_device *phydev) +{ + u32 val; + int reg; + + if (phydev->interface != PHY_INTERFACE_MODE_RMII) + return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + ADIN1300_GE_RMII_CFG_REG, + ADIN1300_GE_RMII_EN); + + reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_GE_RMII_CFG_REG); + if (reg < 0) + return reg; + + reg |= ADIN1300_GE_RMII_EN; + + val = adin_get_reg_value(phydev, "adi,fifo-depth-bits", + adin_rmii_fifo_depths, + ADIN1300_RMII_8_BITS); + + reg &= ~ADIN1300_GE_RMII_FIFO_DEPTH_MSK; + reg |= ADIN1300_GE_RMII_FIFO_DEPTH_SEL(val); + + return phy_write_mmd(phydev, MDIO_MMD_VEND1, + ADIN1300_GE_RMII_CFG_REG, reg); +} + +static int adin_get_downshift(struct phy_device *phydev, u8 *data) +{ + int val, cnt, enable; + + val = phy_read(phydev, ADIN1300_PHY_CTRL2); + if (val < 0) + return val; + + cnt = phy_read(phydev, ADIN1300_PHY_CTRL3); + if (cnt < 0) + return cnt; + + enable = FIELD_GET(ADIN1300_DOWNSPEEDS_EN, val); + cnt = FIELD_GET(ADIN1300_DOWNSPEED_RETRIES_MSK, cnt); + + *data = (enable && cnt) ? cnt : DOWNSHIFT_DEV_DISABLE; + + return 0; +} + +static int adin_set_downshift(struct phy_device *phydev, u8 cnt) +{ + u16 val; + int rc; + + if (cnt == DOWNSHIFT_DEV_DISABLE) + return phy_clear_bits(phydev, ADIN1300_PHY_CTRL2, + ADIN1300_DOWNSPEEDS_EN); + + if (cnt > 7) + return -E2BIG; + + val = FIELD_PREP(ADIN1300_DOWNSPEED_RETRIES_MSK, cnt); + val |= ADIN1300_LINKING_EN; + + rc = phy_modify(phydev, ADIN1300_PHY_CTRL3, + ADIN1300_LINKING_EN | ADIN1300_DOWNSPEED_RETRIES_MSK, + val); + if (rc < 0) + return rc; + + return phy_set_bits(phydev, ADIN1300_PHY_CTRL2, + ADIN1300_DOWNSPEEDS_EN); +} + +static int adin_get_edpd(struct phy_device *phydev, u16 *tx_interval) +{ + int val; + + val = phy_read(phydev, ADIN1300_PHY_CTRL_STATUS2); + if (val < 0) + return val; + + if (ADIN1300_NRG_PD_EN & val) { + if (val & ADIN1300_NRG_PD_TX_EN) + /* default is 1 second */ + *tx_interval = ETHTOOL_PHY_EDPD_DFLT_TX_MSECS; + else + *tx_interval = ETHTOOL_PHY_EDPD_NO_TX; + } else { + *tx_interval = ETHTOOL_PHY_EDPD_DISABLE; + } + + return 0; +} + +static int adin_set_edpd(struct phy_device *phydev, u16 tx_interval) +{ + u16 val; + + if (tx_interval == ETHTOOL_PHY_EDPD_DISABLE) + return phy_clear_bits(phydev, ADIN1300_PHY_CTRL_STATUS2, + (ADIN1300_NRG_PD_EN | ADIN1300_NRG_PD_TX_EN)); + + val = ADIN1300_NRG_PD_EN; + + switch (tx_interval) { + case 1000: /* 1 second */ + /* fallthrough */ + case ETHTOOL_PHY_EDPD_DFLT_TX_MSECS: + val |= ADIN1300_NRG_PD_TX_EN; + /* fallthrough */ + case ETHTOOL_PHY_EDPD_NO_TX: + break; + default: + return -EINVAL; + } + + return phy_modify(phydev, ADIN1300_PHY_CTRL_STATUS2, + (ADIN1300_NRG_PD_EN | ADIN1300_NRG_PD_TX_EN), + val); +} + +static int adin_get_tunable(struct phy_device *phydev, + struct ethtool_tunable *tuna, void *data) +{ + switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + return adin_get_downshift(phydev, data); + case ETHTOOL_PHY_EDPD: + return adin_get_edpd(phydev, data); + default: + return -EOPNOTSUPP; + } +} + +static int adin_set_tunable(struct phy_device *phydev, + struct ethtool_tunable *tuna, const void *data) +{ + switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + return adin_set_downshift(phydev, *(const u8 *)data); + case ETHTOOL_PHY_EDPD: + return adin_set_edpd(phydev, *(const u16 *)data); + default: + return -EOPNOTSUPP; + } +} + +static int adin_config_init(struct phy_device *phydev) +{ + int rc; + + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + + rc = adin_config_rgmii_mode(phydev); + if (rc < 0) + return rc; + + rc = adin_config_rmii_mode(phydev); + if (rc < 0) + return rc; + + rc = adin_set_downshift(phydev, 4); + if (rc < 0) + return rc; + + rc = adin_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS); + if (rc < 0) + return rc; + + phydev_dbg(phydev, "PHY is using mode '%s'\n", + phy_modes(phydev->interface)); + + return 0; +} + +static int adin_phy_ack_intr(struct phy_device *phydev) +{ + /* Clear pending interrupts */ + int rc = phy_read(phydev, ADIN1300_INT_STATUS_REG); + + return rc < 0 ? rc : 0; +} + +static int adin_phy_config_intr(struct phy_device *phydev) +{ + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + return phy_set_bits(phydev, ADIN1300_INT_MASK_REG, + ADIN1300_INT_MASK_EN); + + return phy_clear_bits(phydev, ADIN1300_INT_MASK_REG, + ADIN1300_INT_MASK_EN); +} + +static int adin_cl45_to_adin_reg(struct phy_device *phydev, int devad, + u16 cl45_regnum) +{ + struct adin_clause45_mmd_map *m; + int i; + + if (devad == MDIO_MMD_VEND1) + return cl45_regnum; + + for (i = 0; i < ARRAY_SIZE(adin_clause45_mmd_map); i++) { + m = &adin_clause45_mmd_map[i]; + if (m->devad == devad && m->cl45_regnum == cl45_regnum) + return m->adin_regnum; + } + + phydev_err(phydev, + "No translation available for devad: %d reg: %04x\n", + devad, cl45_regnum); + + return -EINVAL; +} + +static int adin_read_mmd(struct phy_device *phydev, int devad, u16 regnum) +{ + struct mii_bus *bus = phydev->mdio.bus; + int phy_addr = phydev->mdio.addr; + int adin_regnum; + int err; + + adin_regnum = adin_cl45_to_adin_reg(phydev, devad, regnum); + if (adin_regnum < 0) + return adin_regnum; + + err = __mdiobus_write(bus, phy_addr, ADIN1300_MII_EXT_REG_PTR, + adin_regnum); + if (err) + return err; + + return __mdiobus_read(bus, phy_addr, ADIN1300_MII_EXT_REG_DATA); +} + +static int adin_write_mmd(struct phy_device *phydev, int devad, u16 regnum, + u16 val) +{ + struct mii_bus *bus = phydev->mdio.bus; + int phy_addr = phydev->mdio.addr; + int adin_regnum; + int err; + + adin_regnum = adin_cl45_to_adin_reg(phydev, devad, regnum); + if (adin_regnum < 0) + return adin_regnum; + + err = __mdiobus_write(bus, phy_addr, ADIN1300_MII_EXT_REG_PTR, + adin_regnum); + if (err) + return err; + + return __mdiobus_write(bus, phy_addr, ADIN1300_MII_EXT_REG_DATA, val); +} + +static int adin_config_mdix(struct phy_device *phydev) +{ + bool auto_en, mdix_en; + int reg; + + mdix_en = false; + auto_en = false; + switch (phydev->mdix_ctrl) { + case ETH_TP_MDI: + break; + case ETH_TP_MDI_X: + mdix_en = true; + break; + case ETH_TP_MDI_AUTO: + auto_en = true; + break; + default: + return -EINVAL; + } + + reg = phy_read(phydev, ADIN1300_PHY_CTRL1); + if (reg < 0) + return reg; + + if (mdix_en) + reg |= ADIN1300_MAN_MDIX_EN; + else + reg &= ~ADIN1300_MAN_MDIX_EN; + + if (auto_en) + reg |= ADIN1300_AUTO_MDI_EN; + else + reg &= ~ADIN1300_AUTO_MDI_EN; + + return phy_write(phydev, ADIN1300_PHY_CTRL1, reg); +} + +static int adin_config_aneg(struct phy_device *phydev) +{ + int ret; + + ret = adin_config_mdix(phydev); + if (ret) + return ret; + + return genphy_config_aneg(phydev); +} + +static int adin_mdix_update(struct phy_device *phydev) +{ + bool auto_en, mdix_en; + bool swapped; + int reg; + + reg = phy_read(phydev, ADIN1300_PHY_CTRL1); + if (reg < 0) + return reg; + + auto_en = !!(reg & ADIN1300_AUTO_MDI_EN); + mdix_en = !!(reg & ADIN1300_MAN_MDIX_EN); + + /* If MDI/MDIX is forced, just read it from the control reg */ + if (!auto_en) { + if (mdix_en) + phydev->mdix = ETH_TP_MDI_X; + else + phydev->mdix = ETH_TP_MDI; + return 0; + } + + /** + * Otherwise, we need to deduce it from the PHY status2 reg. + * When Auto-MDI is enabled, the ADIN1300_MAN_MDIX_EN bit implies + * a preference for MDIX when it is set. + */ + reg = phy_read(phydev, ADIN1300_PHY_STATUS1); + if (reg < 0) + return reg; + + swapped = !!(reg & ADIN1300_PAIR_01_SWAP); + + if (mdix_en != swapped) + phydev->mdix = ETH_TP_MDI_X; + else + phydev->mdix = ETH_TP_MDI; + + return 0; +} + +static int adin_read_status(struct phy_device *phydev) +{ + int ret; + + ret = adin_mdix_update(phydev); + if (ret < 0) + return ret; + + return genphy_read_status(phydev); +} + +static int adin_soft_reset(struct phy_device *phydev) +{ + int rc; + + /* The reset bit is self-clearing, set it and wait */ + rc = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + ADIN1300_GE_SOFT_RESET_REG, + ADIN1300_GE_SOFT_RESET); + if (rc < 0) + return rc; + + msleep(10); + + /* If we get a read error something may be wrong */ + rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, + ADIN1300_GE_SOFT_RESET_REG); + + return rc < 0 ? rc : 0; +} + +static int adin_get_sset_count(struct phy_device *phydev) +{ + return ARRAY_SIZE(adin_hw_stats); +} + +static void adin_get_strings(struct phy_device *phydev, u8 *data) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adin_hw_stats); i++) { + strlcpy(&data[i * ETH_GSTRING_LEN], + adin_hw_stats[i].string, ETH_GSTRING_LEN); + } +} + +static int adin_read_mmd_stat_regs(struct phy_device *phydev, + struct adin_hw_stat *stat, + u32 *val) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, stat->reg1); + if (ret < 0) + return ret; + + *val = (ret & 0xffff); + + if (stat->reg2 == 0) + return 0; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, stat->reg2); + if (ret < 0) + return ret; + + *val <<= 16; + *val |= (ret & 0xffff); + + return 0; +} + +static u64 adin_get_stat(struct phy_device *phydev, int i) +{ + struct adin_hw_stat *stat = &adin_hw_stats[i]; + struct adin_priv *priv = phydev->priv; + u32 val; + int ret; + + if (stat->reg1 > 0x1f) { + ret = adin_read_mmd_stat_regs(phydev, stat, &val); + if (ret < 0) + return (u64)(~0); + } else { + ret = phy_read(phydev, stat->reg1); + if (ret < 0) + return (u64)(~0); + val = (ret & 0xffff); + } + + priv->stats[i] += val; + + return priv->stats[i]; +} + +static void adin_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + int i, rc; + + /* latch copies of all the frame-checker counters */ + rc = phy_read(phydev, ADIN1300_RX_ERR_CNT); + if (rc < 0) + return; + + for (i = 0; i < ARRAY_SIZE(adin_hw_stats); i++) + data[i] = adin_get_stat(phydev, i); +} + +static int adin_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct adin_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + return 0; +} + +static struct phy_driver adin_driver[] = { + { + PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200), + .name = "ADIN1200", + .probe = adin_probe, + .config_init = adin_config_init, + .soft_reset = adin_soft_reset, + .config_aneg = adin_config_aneg, + .read_status = adin_read_status, + .get_tunable = adin_get_tunable, + .set_tunable = adin_set_tunable, + .ack_interrupt = adin_phy_ack_intr, + .config_intr = adin_phy_config_intr, + .get_sset_count = adin_get_sset_count, + .get_strings = adin_get_strings, + .get_stats = adin_get_stats, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_mmd = adin_read_mmd, + .write_mmd = adin_write_mmd, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300), + .name = "ADIN1300", + .probe = adin_probe, + .config_init = adin_config_init, + .soft_reset = adin_soft_reset, + .config_aneg = adin_config_aneg, + .read_status = adin_read_status, + .get_tunable = adin_get_tunable, + .set_tunable = adin_set_tunable, + .ack_interrupt = adin_phy_ack_intr, + .config_intr = adin_phy_config_intr, + .get_sset_count = adin_get_sset_count, + .get_strings = adin_get_strings, + .get_stats = adin_get_stats, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_mmd = adin_read_mmd, + .write_mmd = adin_write_mmd, + }, +}; + +module_phy_driver(adin_driver); + +static struct mdio_device_id __maybe_unused adin_tbl[] = { + { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200) }, + { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300) }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, adin_tbl); +MODULE_DESCRIPTION("Analog Devices Industrial Ethernet PHY driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 6ad8b1c63c34..2aa7b2e60046 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -249,10 +249,6 @@ static int at803x_config_init(struct phy_device *phydev) { int ret; - ret = genphy_config_init(phydev); - if (ret < 0) - return ret; - /* The RX and TX delay default is: * after HW reset: RX delay enabled and TX delay disabled * after SW reset: RX delay enabled, while TX delay retains the diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 7ed4760fb155..8a4b1d167ce2 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -254,13 +254,8 @@ static int dp83822_config_intr(struct phy_device *phydev) static int dp83822_config_init(struct phy_device *phydev) { - int err; int value; - err = genphy_config_init(phydev); - if (err < 0) - return err; - value = DP83822_WOL_MAGIC_EN | DP83822_WOL_SECURE_ON | DP83822_WOL_EN; return phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 6f9bc7d91f17..54c7c1b44e4d 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -68,13 +68,8 @@ static int dp83848_config_intr(struct phy_device *phydev) static int dp83848_config_init(struct phy_device *phydev) { - int err; int val; - err = genphy_config_init(phydev); - if (err < 0) - return err; - /* DP83620 always reports Auto Negotiation Ability on BMSR. Instead, * we check initial value of BMCR Auto negotiation enable bit */ @@ -113,13 +108,13 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); static struct phy_driver dp83848_driver[] = { DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY", - genphy_config_init), + NULL), DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY", - genphy_config_init), + NULL), DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY", dp83848_config_init), DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY", - genphy_config_init), + NULL), }; module_phy_driver(dp83848_driver); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 1f1ecee0ee2f..37fceaf9fa10 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -37,6 +37,7 @@ #define DP83867_STRAP_STS2 0x006f #define DP83867_RGMIIDCTL 0x0086 #define DP83867_IO_MUX_CFG 0x0170 +#define DP83867_SGMIICTL 0x00D3 #define DP83867_10M_SGMII_CFG 0x016F #define DP83867_10M_SGMII_RATE_ADAPT_MASK BIT(7) @@ -61,6 +62,9 @@ #define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1) #define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0) +/* SGMIICTL bits */ +#define DP83867_SGMII_TYPE BIT(14) + /* STRAP_STS1 bits */ #define DP83867_STRAP_STS1_RESERVED BIT(11) @@ -109,6 +113,7 @@ struct dp83867_private { bool rxctrl_strap_quirk; bool set_clk_output; u32 clk_output_sel; + bool sgmii_ref_clk_en; }; static int dp83867_ack_interrupt(struct phy_device *phydev) @@ -197,6 +202,9 @@ static int dp83867_of_init(struct phy_device *phydev) dp83867->rxctrl_strap_quirk = of_property_read_bool(of_node, "ti,dp83867-rxctrl-strap-quirk"); + dp83867->sgmii_ref_clk_en = of_property_read_bool(of_node, + "ti,sgmii-ref-clock-output-enable"); + /* Existing behavior was to use default pin strapping delay in rgmii * mode, but rgmii should have meant no delay. Warn existing users. */ @@ -389,6 +397,17 @@ static int dp83867_config_init(struct phy_device *phydev) if (ret) return ret; + + val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL); + /* SGMII type is set to 4-wire mode by default. + * If we place appropriate property in dts (see above) + * switch on 6-wire mode. + */ + if (dp83867->sgmii_ref_clk_en) + val |= DP83867_SGMII_TYPE; + else + val &= ~DP83867_SGMII_TYPE; + phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val); } /* Enable Interrupt output INT_OE in CFG3 register */ diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index ac27da16824d..06f08832ebcd 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -277,10 +277,6 @@ static int dp83811_config_init(struct phy_device *phydev) { int value, err; - err = genphy_config_init(phydev); - if (err < 0) - return err; - value = phy_read(phydev, MII_DP83811_SGMII_CTRL); if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { err = phy_write(phydev, MII_DP83811_SGMII_CTRL, diff --git a/drivers/net/phy/mdio-aspeed.c b/drivers/net/phy/mdio-aspeed.c new file mode 100644 index 000000000000..cad820568f75 --- /dev/null +++ b/drivers/net/phy/mdio-aspeed.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (C) 2019 IBM Corp. */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include <linux/mdio.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/platform_device.h> + +#define DRV_NAME "mdio-aspeed" + +#define ASPEED_MDIO_CTRL 0x0 +#define ASPEED_MDIO_CTRL_FIRE BIT(31) +#define ASPEED_MDIO_CTRL_ST BIT(28) +#define ASPEED_MDIO_CTRL_ST_C45 0 +#define ASPEED_MDIO_CTRL_ST_C22 1 +#define ASPEED_MDIO_CTRL_OP GENMASK(27, 26) +#define MDIO_C22_OP_WRITE 0b01 +#define MDIO_C22_OP_READ 0b10 +#define ASPEED_MDIO_CTRL_PHYAD GENMASK(25, 21) +#define ASPEED_MDIO_CTRL_REGAD GENMASK(20, 16) +#define ASPEED_MDIO_CTRL_MIIWDATA GENMASK(15, 0) + +#define ASPEED_MDIO_DATA 0x4 +#define ASPEED_MDIO_DATA_MDC_THRES GENMASK(31, 24) +#define ASPEED_MDIO_DATA_MDIO_EDGE BIT(23) +#define ASPEED_MDIO_DATA_MDIO_LATCH GENMASK(22, 20) +#define ASPEED_MDIO_DATA_IDLE BIT(16) +#define ASPEED_MDIO_DATA_MIIRDATA GENMASK(15, 0) + +#define ASPEED_MDIO_INTERVAL_US 100 +#define ASPEED_MDIO_TIMEOUT_US (ASPEED_MDIO_INTERVAL_US * 10) + +struct aspeed_mdio { + void __iomem *base; +}; + +static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct aspeed_mdio *ctx = bus->priv; + u32 ctrl; + u32 data; + int rc; + + dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr, + regnum); + + /* Just clause 22 for the moment */ + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + ctrl = ASPEED_MDIO_CTRL_FIRE + | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22) + | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_READ) + | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr) + | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum); + + iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); + + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data, + data & ASPEED_MDIO_DATA_IDLE, + ASPEED_MDIO_INTERVAL_US, + ASPEED_MDIO_TIMEOUT_US); + if (rc < 0) + return rc; + + return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data); +} + +static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + struct aspeed_mdio *ctx = bus->priv; + u32 ctrl; + + dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n", + __func__, addr, regnum, val); + + /* Just clause 22 for the moment */ + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + ctrl = ASPEED_MDIO_CTRL_FIRE + | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22) + | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_WRITE) + | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr) + | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum) + | FIELD_PREP(ASPEED_MDIO_CTRL_MIIWDATA, val); + + iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); + + return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, + !(ctrl & ASPEED_MDIO_CTRL_FIRE), + ASPEED_MDIO_INTERVAL_US, + ASPEED_MDIO_TIMEOUT_US); +} + +static int aspeed_mdio_probe(struct platform_device *pdev) +{ + struct aspeed_mdio *ctx; + struct mii_bus *bus; + int rc; + + bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*ctx)); + if (!bus) + return -ENOMEM; + + ctx = bus->priv; + ctx->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ctx->base)) + return PTR_ERR(ctx->base); + + bus->name = DRV_NAME; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id); + bus->parent = &pdev->dev; + bus->read = aspeed_mdio_read; + bus->write = aspeed_mdio_write; + + rc = of_mdiobus_register(bus, pdev->dev.of_node); + if (rc) { + dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); + return rc; + } + + platform_set_drvdata(pdev, bus); + + return 0; +} + +static int aspeed_mdio_remove(struct platform_device *pdev) +{ + mdiobus_unregister(platform_get_drvdata(pdev)); + + return 0; +} + +static const struct of_device_id aspeed_mdio_of_match[] = { + { .compatible = "aspeed,ast2600-mdio", }, + { }, +}; + +static struct platform_driver aspeed_mdio_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = aspeed_mdio_of_match, + }, + .probe = aspeed_mdio_probe, + .remove = aspeed_mdio_remove, +}; + +module_platform_driver(aspeed_mdio_driver); + +MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c index 7d0f388d8db8..7e9975d25066 100644 --- a/drivers/net/phy/mdio-bcm-iproc.c +++ b/drivers/net/phy/mdio-bcm-iproc.c @@ -123,15 +123,13 @@ static int iproc_mdio_probe(struct platform_device *pdev) { struct iproc_mdio_priv *priv; struct mii_bus *bus; - struct resource *res; int rc; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { dev_err(&pdev->dev, "failed to ioremap register\n"); return PTR_ERR(priv->base); diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h index ed5f9bb5448d..b7f89ad27465 100644 --- a/drivers/net/phy/mdio-cavium.h +++ b/drivers/net/phy/mdio-cavium.h @@ -108,6 +108,8 @@ static inline u64 oct_mdio_readq(u64 addr) return cvmx_read_csr(addr); } #else +#include <linux/io-64-nonatomic-lo-hi.h> + #define oct_mdio_writeq(val, addr) writeq(val, (void *)addr) #define oct_mdio_readq(addr) readq((void *)addr) #endif diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/phy/mdio-hisi-femac.c index 287f3ccf1da1..f231c2fbb1de 100644 --- a/drivers/net/phy/mdio-hisi-femac.c +++ b/drivers/net/phy/mdio-hisi-femac.c @@ -74,7 +74,6 @@ static int hisi_femac_mdio_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct mii_bus *bus; struct hisi_femac_mdio_data *data; - struct resource *res; int ret; bus = mdiobus_alloc_size(sizeof(*data)); @@ -88,8 +87,7 @@ static int hisi_femac_mdio_probe(struct platform_device *pdev) bus->parent = &pdev->dev; data = bus->priv; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->membase = devm_ioremap_resource(&pdev->dev, res); + data->membase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->membase)) { ret = PTR_ERR(data->membase); goto err_out_free_mdiobus; diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c index af3910fe8ec7..2d16fc4173c1 100644 --- a/drivers/net/phy/mdio-moxart.c +++ b/drivers/net/phy/mdio-moxart.c @@ -113,7 +113,6 @@ static int moxart_mdio_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct mii_bus *bus; struct moxart_mdio_data *data; - struct resource *res; int ret, i; bus = mdiobus_alloc_size(sizeof(*data)); @@ -138,8 +137,7 @@ static int moxart_mdio_probe(struct platform_device *pdev) bus->irq[i] = PHY_IGNORE_INTERRUPT; data = bus->priv; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->base = devm_ioremap_resource(&pdev->dev, res); + data->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->base)) { ret = PTR_ERR(data->base); goto err_out_free_mdiobus; diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c index 6644762ff2ab..7a9ad54582e1 100644 --- a/drivers/net/phy/mdio-mux-meson-g12a.c +++ b/drivers/net/phy/mdio-mux-meson-g12a.c @@ -302,7 +302,6 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct g12a_mdio_mux *priv; - struct resource *res; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -311,8 +310,7 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->regs = devm_ioremap_resource(dev, res); + priv->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs)) return PTR_ERR(priv->regs); diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index 20ffd8fb79ce..58d6504495e0 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c @@ -92,7 +92,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct mii_bus *bus; struct sun4i_mdio_data *data; - struct resource *res; int ret; bus = mdiobus_alloc_size(sizeof(*data)); @@ -106,8 +105,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev) bus->parent = &pdev->dev; data = bus->priv; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->membase = devm_ioremap_resource(&pdev->dev, res); + data->membase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->membase)) { ret = PTR_ERR(data->membase); goto err_out_free_mdiobus; diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c index 717cc2a056e8..34990eaa3298 100644 --- a/drivers/net/phy/mdio-xgene.c +++ b/drivers/net/phy/mdio-xgene.c @@ -328,7 +328,6 @@ static int xgene_mdio_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct mii_bus *mdio_bus; const struct of_device_id *of_id; - struct resource *res; struct xgene_mdio_pdata *pdata; void __iomem *csr_base; int mdio_id = 0, ret = 0; @@ -355,8 +354,7 @@ static int xgene_mdio_probe(struct platform_device *pdev) pdata->mdio_id = mdio_id; pdata->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - csr_base = devm_ioremap_resource(dev, res); + csr_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(csr_base)) return PTR_ERR(csr_base); pdata->mac_csr_addr = csr_base; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index ce940871331e..2e29ab841b4d 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -42,21 +42,17 @@ static int mdiobus_register_gpiod(struct mdio_device *mdiodev) { - struct gpio_desc *gpiod = NULL; + int error; /* Deassert the optional reset signal */ - if (mdiodev->dev.of_node) - gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode, - "reset-gpios", 0, GPIOD_OUT_LOW, - "PHY reset"); - if (IS_ERR(gpiod)) { - if (PTR_ERR(gpiod) == -ENOENT || PTR_ERR(gpiod) == -ENOSYS) - gpiod = NULL; - else - return PTR_ERR(gpiod); - } - - mdiodev->reset_gpio = gpiod; + mdiodev->reset_gpio = gpiod_get_optional(&mdiodev->dev, + "reset", GPIOD_OUT_LOW); + error = PTR_ERR_OR_ZERO(mdiodev->reset_gpio); + if (error) + return error; + + if (mdiodev->reset_gpio) + gpiod_set_consumer_name(mdiodev->reset_gpio, "PHY reset"); return 0; } diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index fa80d6dce8ee..e8f2ca625837 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -136,7 +136,7 @@ static int meson_gxl_config_init(struct phy_device *phydev) if (ret) return ret; - return genphy_config_init(phydev); + return 0; } /* This function is provided to cope with the possible failures of this phy diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index eb1b3287fe08..a644e8e5071c 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -305,7 +305,6 @@ static int lan88xx_config_init(struct phy_device *phydev) { int val; - genphy_config_init(phydev); /*Zerodetect delay enable */ val = phy_read_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG); diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index 3d09b471632c..001def4509c2 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -48,7 +48,6 @@ static struct phy_driver microchip_t1_phy_driver[] = { .features = PHY_BASIC_T1_FEATURES, - .config_init = genphy_config_init, .config_aneg = genphy_config_aneg, .ack_interrupt = lan87xx_phy_ack_interrupt, diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 645d354ffb48..7ada1fd9ca71 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -1725,7 +1725,7 @@ static int vsc8584_config_init(struct phy_device *phydev) return ret; } - return genphy_config_init(phydev); + return 0; err: mutex_unlock(&phydev->mdio.bus->mdio_lock); @@ -1767,7 +1767,7 @@ static int vsc85xx_config_init(struct phy_device *phydev) return rc; } - return genphy_config_init(phydev); + return 0; } static int vsc8584_did_interrupt(struct phy_device *phydev) diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 16667fbac8bf..369903d9b6ec 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -207,14 +207,14 @@ size_t phy_speeds(unsigned int *speeds, size_t size, return count; } -static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) +static int __set_linkmode_max_speed(u32 max_speed, unsigned long *addr) { const struct phy_setting *p; int i; for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { if (p->speed > max_speed) - linkmode_clear_bit(p->bit, phydev->supported); + linkmode_clear_bit(p->bit, addr); else break; } @@ -222,6 +222,11 @@ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) return 0; } +static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) +{ + return __set_linkmode_max_speed(max_speed, phydev->supported); +} + int phy_set_max_speed(struct phy_device *phydev, u32 max_speed) { int err; @@ -310,6 +315,34 @@ void phy_resolve_aneg_linkmode(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode); +static int phy_resolve_min_speed(struct phy_device *phydev, bool fdx_only) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(common); + int i = ARRAY_SIZE(settings); + + linkmode_and(common, phydev->lp_advertising, phydev->advertising); + + while (--i >= 0) { + if (test_bit(settings[i].bit, common)) { + if (fdx_only && settings[i].duplex != DUPLEX_FULL) + continue; + return settings[i].speed; + } + } + + return SPEED_UNKNOWN; +} + +int phy_speed_down_core(struct phy_device *phydev) +{ + int min_common_speed = phy_resolve_min_speed(phydev, true); + + if (min_common_speed == SPEED_UNKNOWN) + return -EINVAL; + + return __set_linkmode_max_speed(min_common_speed, phydev->advertising); +} + static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, u16 regnum) { @@ -783,24 +816,43 @@ int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val) EXPORT_SYMBOL(phy_write_paged); /** - * phy_modify_paged() - Convenience function for modifying a paged register + * phy_modify_paged_changed() - Function for modifying a paged register * @phydev: a pointer to a &struct phy_device * @page: the page for the phy * @regnum: register number * @mask: bit mask of bits to clear * @set: bit mask of bits to set * - * Same rules as for phy_read() and phy_write(). + * Returns negative errno, 0 if there was no change, and 1 in case of change */ -int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, - u16 mask, u16 set) +int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum, + u16 mask, u16 set) { int ret = 0, oldpage; oldpage = phy_select_page(phydev, page); if (oldpage >= 0) - ret = __phy_modify(phydev, regnum, mask, set); + ret = __phy_modify_changed(phydev, regnum, mask, set); return phy_restore_page(phydev, oldpage, ret); } +EXPORT_SYMBOL(phy_modify_paged_changed); + +/** + * phy_modify_paged() - Convenience function for modifying a paged register + * @phydev: a pointer to a &struct phy_device + * @page: the page for the phy + * @regnum: register number + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * Same rules as for phy_read() and phy_write(). + */ +int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, + u16 mask, u16 set) +{ + int ret = phy_modify_paged_changed(phydev, page, regnum, mask, set); + + return ret < 0 ? ret : 0; +} EXPORT_SYMBOL(phy_modify_paged); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 6b0f89369b46..7c92afd36bbe 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -525,6 +525,12 @@ static int phy_check_link_status(struct phy_device *phydev) WARN_ON(!mutex_is_locked(&phydev->lock)); + /* Keep previous state if loopback is enabled because some PHYs + * report that Link is Down when loopback is enabled. + */ + if (phydev->loopback_enabled) + return 0; + err = phy_read_status(phydev); if (err) return err; @@ -608,38 +614,21 @@ static int phy_poll_aneg_done(struct phy_device *phydev) */ int phy_speed_down(struct phy_device *phydev, bool sync) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); - __ETHTOOL_DECLARE_LINK_MODE_MASK(adv); + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp); int ret; if (phydev->autoneg != AUTONEG_ENABLE) return 0; - linkmode_copy(adv_old, phydev->advertising); - linkmode_copy(adv, phydev->lp_advertising); - linkmode_and(adv, adv, phydev->supported); - - if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, adv) || - linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, adv)) { - linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - phydev->advertising); - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - adv) || - linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - adv)) { - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - phydev->advertising); - } + linkmode_copy(adv_tmp, phydev->advertising); + + ret = phy_speed_down_core(phydev); + if (ret) + return ret; + + linkmode_copy(phydev->adv_old, adv_tmp); - if (linkmode_equal(phydev->advertising, adv_old)) + if (linkmode_equal(phydev->advertising, adv_tmp)) return 0; ret = phy_config_aneg(phydev); @@ -658,30 +647,19 @@ EXPORT_SYMBOL_GPL(phy_speed_down); */ int phy_speed_up(struct phy_device *phydev) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(all_speeds) = { 0, }; - __ETHTOOL_DECLARE_LINK_MODE_MASK(not_speeds); - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); - __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); - __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds); - - linkmode_copy(adv_old, phydev->advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp); if (phydev->autoneg != AUTONEG_ENABLE) return 0; - linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, all_speeds); - linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, all_speeds); - linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, all_speeds); - linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, all_speeds); - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, all_speeds); - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, all_speeds); + if (linkmode_empty(phydev->adv_old)) + return 0; - linkmode_andnot(not_speeds, adv_old, all_speeds); - linkmode_copy(supported, phydev->supported); - linkmode_and(speeds, supported, all_speeds); - linkmode_or(phydev->advertising, not_speeds, speeds); + linkmode_copy(adv_tmp, phydev->advertising); + linkmode_copy(phydev->advertising, phydev->adv_old); + linkmode_zero(phydev->adv_old); - if (linkmode_equal(phydev->advertising, adv_old)) + if (linkmode_equal(phydev->advertising, adv_tmp)) return 0; return phy_config_aneg(phydev); @@ -939,8 +917,8 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->link = 0; phy_link_down(phydev, true); - do_suspend = true; } + do_suspend = true; break; } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 27ebc2c6c2d0..d347ddcac45b 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1564,24 +1564,20 @@ EXPORT_SYMBOL(phy_reset_after_clk_enable); */ static int genphy_config_advert(struct phy_device *phydev) { - u32 advertise; - int bmsr, adv; - int err, changed = 0; + int err, bmsr, changed = 0; + u32 adv; /* Only allow advertising what this PHY supports */ linkmode_and(phydev->advertising, phydev->advertising, phydev->supported); - if (!ethtool_convert_link_mode_to_legacy_u32(&advertise, - phydev->advertising)) - phydev_warn(phydev, "PHY advertising (%*pb) more modes than genphy supports, some modes not advertised.\n", - __ETHTOOL_LINK_MODE_MASK_NBITS, - phydev->advertising); + + adv = linkmode_adv_to_mii_adv_t(phydev->advertising); /* Setup standard advertisement */ err = phy_modify_changed(phydev, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, - ethtool_adv_to_mii_adv_t(advertise)); + adv); if (err < 0) return err; if (err > 0) @@ -1598,13 +1594,7 @@ static int genphy_config_advert(struct phy_device *phydev) if (!(bmsr & BMSR_ESTATEN)) return changed; - /* Configure gigabit if it's supported */ - adv = 0; - if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - phydev->supported) || - linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - phydev->supported)) - adv = ethtool_adv_to_mii_ctrl1000_t(advertise); + adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); err = phy_modify_changed(phydev, MII_CTRL1000, ADVERTISE_1000FULL | ADVERTISE_1000HALF, @@ -1681,18 +1671,20 @@ int genphy_restart_aneg(struct phy_device *phydev) EXPORT_SYMBOL(genphy_restart_aneg); /** - * genphy_config_aneg - restart auto-negotiation or write BMCR + * __genphy_config_aneg - restart auto-negotiation or write BMCR * @phydev: target phy_device struct + * @changed: whether autoneg is requested * * Description: If auto-negotiation is enabled, we configure the * advertising, and then restart auto-negotiation. If it is not * enabled, then we write the BMCR. */ -int genphy_config_aneg(struct phy_device *phydev) +int __genphy_config_aneg(struct phy_device *phydev, bool changed) { - int err, changed; + int err; - changed = genphy_config_eee_advert(phydev); + if (genphy_config_eee_advert(phydev)) + changed = true; if (AUTONEG_ENABLE != phydev->autoneg) return genphy_setup_forced(phydev); @@ -1700,10 +1692,10 @@ int genphy_config_aneg(struct phy_device *phydev) err = genphy_config_advert(phydev); if (err < 0) /* error */ return err; + else if (err) + changed = true; - changed |= err; - - if (changed == 0) { + if (!changed) { /* Advertisement hasn't changed, but maybe aneg was never on to * begin with? Or maybe phy was isolated? */ @@ -1713,18 +1705,15 @@ int genphy_config_aneg(struct phy_device *phydev) return ctl; if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE)) - changed = 1; /* do restart aneg */ + changed = true; /* do restart aneg */ } /* Only restart aneg if we are advertising something different * than we were before. */ - if (changed > 0) - return genphy_restart_aneg(phydev); - - return 0; + return changed ? genphy_restart_aneg(phydev) : 0; } -EXPORT_SYMBOL(genphy_config_aneg); +EXPORT_SYMBOL(__genphy_config_aneg); /** * genphy_aneg_done - return auto-negotiation status @@ -1805,7 +1794,7 @@ EXPORT_SYMBOL(genphy_update_link); */ int genphy_read_status(struct phy_device *phydev) { - int adv, lpa, lpagb, err, old_link = phydev->link; + int lpa, lpagb, err, old_link = phydev->link; /* Update the link, but return if there was an error */ err = genphy_update_link(phydev); @@ -1821,19 +1810,18 @@ int genphy_read_status(struct phy_device *phydev) phydev->pause = 0; phydev->asym_pause = 0; - linkmode_zero(phydev->lp_advertising); - if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) { if (phydev->is_gigabit_capable) { lpagb = phy_read(phydev, MII_STAT1000); if (lpagb < 0) return lpagb; - adv = phy_read(phydev, MII_CTRL1000); - if (adv < 0) - return adv; - if (lpagb & LPA_1000MSFAIL) { + int adv = phy_read(phydev, MII_CTRL1000); + + if (adv < 0) + return adv; + if (adv & CTL1000_ENABLE_MASTER) phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n"); else @@ -1907,57 +1895,6 @@ int genphy_soft_reset(struct phy_device *phydev) } EXPORT_SYMBOL(genphy_soft_reset); -int genphy_config_init(struct phy_device *phydev) -{ - int val; - __ETHTOOL_DECLARE_LINK_MODE_MASK(features) = { 0, }; - - linkmode_set_bit_array(phy_basic_ports_array, - ARRAY_SIZE(phy_basic_ports_array), - features); - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, features); - linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, features); - - /* Do we support autonegotiation? */ - val = phy_read(phydev, MII_BMSR); - if (val < 0) - return val; - - if (val & BMSR_ANEGCAPABLE) - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, features); - - if (val & BMSR_100FULL) - linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, features); - if (val & BMSR_100HALF) - linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, features); - if (val & BMSR_10FULL) - linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, features); - if (val & BMSR_10HALF) - linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, features); - - if (val & BMSR_ESTATEN) { - val = phy_read(phydev, MII_ESTATUS); - if (val < 0) - return val; - - if (val & ESTATUS_1000_TFULL) - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - features); - if (val & ESTATUS_1000_THALF) - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - features); - if (val & ESTATUS_1000_XFULL) - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, - features); - } - - linkmode_and(phydev->supported, phydev->supported, features); - linkmode_and(phydev->advertising, phydev->advertising, features); - - return 0; -} -EXPORT_SYMBOL(genphy_config_init); - /** * genphy_read_abilities - read PHY abilities from Clause 22 registers * @phydev: target phy_device struct diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index a669945eb829..677c45985338 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -39,6 +39,16 @@ #define RTL8366RB_POWER_SAVE 0x15 #define RTL8366RB_POWER_SAVE_ON BIT(12) +#define RTL_SUPPORTS_5000FULL BIT(14) +#define RTL_SUPPORTS_2500FULL BIT(13) +#define RTL_SUPPORTS_10000FULL BIT(0) +#define RTL_ADV_2500FULL BIT(7) +#define RTL_LPADV_10000FULL BIT(11) +#define RTL_LPADV_5000FULL BIT(6) +#define RTL_LPADV_2500FULL BIT(5) + +#define RTL_GENERIC_PHYID 0x001cc800 + MODULE_DESCRIPTION("Realtek PHY driver"); MODULE_AUTHOR("Johnson Leung"); MODULE_LICENSE("GPL"); @@ -256,6 +266,166 @@ static int rtl8366rb_config_init(struct phy_device *phydev) return ret; } +static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum) +{ + int ret; + + if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) { + rtl821x_write_page(phydev, 0xa5c); + ret = __phy_read(phydev, 0x12); + rtl821x_write_page(phydev, 0); + } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) { + rtl821x_write_page(phydev, 0xa5d); + ret = __phy_read(phydev, 0x10); + rtl821x_write_page(phydev, 0); + } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE) { + rtl821x_write_page(phydev, 0xa5d); + ret = __phy_read(phydev, 0x11); + rtl821x_write_page(phydev, 0); + } else { + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, + u16 val) +{ + int ret; + + if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) { + rtl821x_write_page(phydev, 0xa5d); + ret = __phy_write(phydev, 0x10, val); + rtl821x_write_page(phydev, 0); + } else { + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int rtl8125_read_mmd(struct phy_device *phydev, int devnum, u16 regnum) +{ + int ret = rtlgen_read_mmd(phydev, devnum, regnum); + + if (ret != -EOPNOTSUPP) + return ret; + + if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE2) { + rtl821x_write_page(phydev, 0xa6e); + ret = __phy_read(phydev, 0x16); + rtl821x_write_page(phydev, 0); + } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) { + rtl821x_write_page(phydev, 0xa6d); + ret = __phy_read(phydev, 0x12); + rtl821x_write_page(phydev, 0); + } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE2) { + rtl821x_write_page(phydev, 0xa6d); + ret = __phy_read(phydev, 0x10); + rtl821x_write_page(phydev, 0); + } + + return ret; +} + +static int rtl8125_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, + u16 val) +{ + int ret = rtlgen_write_mmd(phydev, devnum, regnum, val); + + if (ret != -EOPNOTSUPP) + return ret; + + if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) { + rtl821x_write_page(phydev, 0xa6d); + ret = __phy_write(phydev, 0x12, val); + rtl821x_write_page(phydev, 0); + } + + return ret; +} + +static int rtl8125_get_features(struct phy_device *phydev) +{ + int val; + + val = phy_read_paged(phydev, 0xa61, 0x13); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->supported, val & RTL_SUPPORTS_2500FULL); + linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + phydev->supported, val & RTL_SUPPORTS_5000FULL); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phydev->supported, val & RTL_SUPPORTS_10000FULL); + + return genphy_read_abilities(phydev); +} + +static int rtl8125_config_aneg(struct phy_device *phydev) +{ + int ret = 0; + + if (phydev->autoneg == AUTONEG_ENABLE) { + u16 adv2500 = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->advertising)) + adv2500 = RTL_ADV_2500FULL; + + ret = phy_modify_paged_changed(phydev, 0xa5d, 0x12, + RTL_ADV_2500FULL, adv2500); + if (ret < 0) + return ret; + } + + return __genphy_config_aneg(phydev, ret); +} + +static int rtl8125_read_status(struct phy_device *phydev) +{ + if (phydev->autoneg == AUTONEG_ENABLE) { + int lpadv = phy_read_paged(phydev, 0xa5d, 0x13); + + if (lpadv < 0) + return lpadv; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phydev->lp_advertising, lpadv & RTL_LPADV_10000FULL); + linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + phydev->lp_advertising, lpadv & RTL_LPADV_5000FULL); + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->lp_advertising, lpadv & RTL_LPADV_2500FULL); + } + + return genphy_read_status(phydev); +} + +static bool rtlgen_supports_2_5gbps(struct phy_device *phydev) +{ + int val; + + phy_write(phydev, RTL821x_PAGE_SELECT, 0xa61); + val = phy_read(phydev, 0x13); + phy_write(phydev, RTL821x_PAGE_SELECT, 0); + + return val >= 0 && val & RTL_SUPPORTS_2500FULL; +} + +static int rtlgen_match_phy_device(struct phy_device *phydev) +{ + return phydev->phy_id == RTL_GENERIC_PHYID && + !rtlgen_supports_2_5gbps(phydev); +} + +static int rtl8125_match_phy_device(struct phy_device *phydev) +{ + return phydev->phy_id == RTL_GENERIC_PHYID && + rtlgen_supports_2_5gbps(phydev); +} + static struct phy_driver realtek_drvs[] = { { PHY_ID_MATCH_EXACT(0x00008201), @@ -326,12 +496,26 @@ static struct phy_driver realtek_drvs[] = { .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { - PHY_ID_MATCH_EXACT(0x001cc800), - .name = "Generic Realtek PHY", + .name = "Generic FE-GE Realtek PHY", + .match_phy_device = rtlgen_match_phy_device, + .suspend = genphy_suspend, + .resume = genphy_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, + .read_mmd = rtlgen_read_mmd, + .write_mmd = rtlgen_write_mmd, + }, { + .name = "RTL8125 2.5Gbps internal", + .match_phy_device = rtl8125_match_phy_device, + .get_features = rtl8125_get_features, + .config_aneg = rtl8125_config_aneg, + .read_status = rtl8125_read_status, .suspend = genphy_suspend, .resume = genphy_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + .read_mmd = rtl8125_read_mmd, + .write_mmd = rtl8125_write_mmd, }, { PHY_ID_MATCH_EXACT(0x001cc961), .name = "RTL8366RB Gigabit Ethernet", diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index e36c04c26866..272d5773573e 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -429,6 +429,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, return 0; /* fall through */ case hwmon_temp_input: + case hwmon_temp_label: return 0444; default: return 0; @@ -447,6 +448,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, return 0; /* fall through */ case hwmon_in_input: + case hwmon_in_label: return 0444; default: return 0; @@ -465,6 +467,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, return 0; /* fall through */ case hwmon_curr_input: + case hwmon_curr_label: return 0444; default: return 0; @@ -492,6 +495,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, return 0; /* fall through */ case hwmon_power_input: + case hwmon_power_label: return 0444; default: return 0; @@ -987,9 +991,63 @@ static int sfp_hwmon_read(struct device *dev, enum hwmon_sensor_types type, } } +static const char *const sfp_hwmon_power_labels[] = { + "TX_power", + "RX_power", +}; + +static int sfp_hwmon_read_string(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + switch (type) { + case hwmon_curr: + switch (attr) { + case hwmon_curr_label: + *str = "bias"; + return 0; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_temp: + switch (attr) { + case hwmon_temp_label: + *str = "temperature"; + return 0; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_in: + switch (attr) { + case hwmon_in_label: + *str = "VCC"; + return 0; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_power: + switch (attr) { + case hwmon_power_label: + *str = sfp_hwmon_power_labels[channel]; + return 0; + default: + return -EOPNOTSUPP; + } + break; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + static const struct hwmon_ops sfp_hwmon_ops = { .is_visible = sfp_hwmon_is_visible, .read = sfp_hwmon_read, + .read_string = sfp_hwmon_read_string, }; static u32 sfp_hwmon_chip_config[] = { @@ -1007,7 +1065,8 @@ static u32 sfp_hwmon_temp_config[] = { HWMON_T_MAX | HWMON_T_MIN | HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | HWMON_T_CRIT | HWMON_T_LCRIT | - HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM, + HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM | + HWMON_T_LABEL, 0, }; @@ -1021,7 +1080,8 @@ static u32 sfp_hwmon_vcc_config[] = { HWMON_I_MAX | HWMON_I_MIN | HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM | HWMON_I_CRIT | HWMON_I_LCRIT | - HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM, + HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM | + HWMON_I_LABEL, 0, }; @@ -1035,7 +1095,8 @@ static u32 sfp_hwmon_bias_config[] = { HWMON_C_MAX | HWMON_C_MIN | HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM | HWMON_C_CRIT | HWMON_C_LCRIT | - HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM, + HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM | + HWMON_C_LABEL, 0, }; @@ -1050,13 +1111,15 @@ static u32 sfp_hwmon_power_config[] = { HWMON_P_MAX | HWMON_P_MIN | HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM | HWMON_P_CRIT | HWMON_P_LCRIT | - HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM, + HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM | + HWMON_P_LABEL, /* Receive power */ HWMON_P_INPUT | HWMON_P_MAX | HWMON_P_MIN | HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM | HWMON_P_CRIT | HWMON_P_LCRIT | - HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM, + HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM | + HWMON_P_LABEL, 0, }; diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c index dad22481d9c1..53c214a22b95 100644 --- a/drivers/net/phy/swphy.c +++ b/drivers/net/phy/swphy.c @@ -22,6 +22,7 @@ struct swmii_regs { u16 bmsr; u16 lpa; u16 lpagb; + u16 estat; }; enum { @@ -48,6 +49,7 @@ static const struct swmii_regs speed[] = { [SWMII_SPEED_1000] = { .bmsr = BMSR_ESTATEN, .lpagb = LPA_1000FULL | LPA_1000HALF, + .estat = ESTATUS_1000_TFULL | ESTATUS_1000_THALF, }, }; @@ -56,11 +58,13 @@ static const struct swmii_regs duplex[] = { .bmsr = BMSR_ESTATEN | BMSR_100HALF, .lpa = LPA_10HALF | LPA_100HALF, .lpagb = LPA_1000HALF, + .estat = ESTATUS_1000_THALF, }, [SWMII_DUPLEX_FULL] = { .bmsr = BMSR_ESTATEN | BMSR_100FULL, .lpa = LPA_10FULL | LPA_100FULL, .lpagb = LPA_1000FULL, + .estat = ESTATUS_1000_TFULL, }, }; @@ -112,6 +116,7 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state) { int speed_index, duplex_index; u16 bmsr = BMSR_ANEGCAPABLE; + u16 estat = 0; u16 lpagb = 0; u16 lpa = 0; @@ -125,6 +130,7 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state) duplex_index = state->duplex ? SWMII_DUPLEX_FULL : SWMII_DUPLEX_HALF; bmsr |= speed[speed_index].bmsr & duplex[duplex_index].bmsr; + estat |= speed[speed_index].estat & duplex[duplex_index].estat; if (state->link) { bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; @@ -151,6 +157,8 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state) return lpa; case MII_STAT1000: return lpagb; + case MII_ESTATUS: + return estat; /* * We do not support emulating Clause 45 over Clause 22 register diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 43691b1acfd9..bb680352708a 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -197,7 +197,7 @@ static int vsc738x_config_init(struct phy_device *phydev) vsc73xx_config_init(phydev); - return genphy_config_init(phydev); + return 0; } static int vsc739x_config_init(struct phy_device *phydev) @@ -229,7 +229,7 @@ static int vsc739x_config_init(struct phy_device *phydev) vsc73xx_config_init(phydev); - return genphy_config_init(phydev); + return 0; } static int vsc73xx_config_aneg(struct phy_device *phydev) @@ -267,7 +267,7 @@ static int vsc8601_config_init(struct phy_device *phydev) if (ret < 0) return ret; - return genphy_config_init(phydev); + return 0; } static int vsc824x_ack_interrupt(struct phy_device *phydev) diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 2d1449345959..151c2a3f0b3a 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -29,7 +29,7 @@ struct gmii2rgmii { static int xgmiitorgmii_read_status(struct phy_device *phydev) { - struct gmii2rgmii *priv = phydev->priv; + struct gmii2rgmii *priv = mdiodev_get_drvdata(&phydev->mdio); struct mii_bus *bus = priv->mdio->bus; int addr = priv->mdio->addr; u16 val = 0; @@ -90,7 +90,7 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) memcpy(&priv->conv_phy_drv, priv->phy_dev->drv, sizeof(struct phy_driver)); priv->conv_phy_drv.read_status = xgmiitorgmii_read_status; - priv->phy_dev->priv = priv; + mdiodev_set_drvdata(&priv->phy_dev->mdio, priv); priv->phy_dev->drv = &priv->conv_phy_drv; return 0; diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c index ea90db3c7705..58a69f830d29 100644 --- a/drivers/net/slip/slhc.c +++ b/drivers/net/slip/slhc.c @@ -91,8 +91,8 @@ static unsigned short pull16(unsigned char **cpp); struct slcompress * slhc_init(int rslots, int tslots) { - register short i; - register struct cstate *ts; + short i; + struct cstate *ts; struct slcompress *comp; if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255) @@ -206,7 +206,7 @@ pull16(unsigned char **cpp) static long decode(unsigned char **cpp) { - register int x; + int x; x = *(*cpp)++; if(x == 0){ @@ -227,14 +227,14 @@ int slhc_compress(struct slcompress *comp, unsigned char *icp, int isize, unsigned char *ocp, unsigned char **cpp, int compress_cid) { - register struct cstate *ocs = &(comp->tstate[comp->xmit_oldest]); - register struct cstate *lcs = ocs; - register struct cstate *cs = lcs->next; - register unsigned long deltaS, deltaA; - register short changes = 0; + struct cstate *ocs = &(comp->tstate[comp->xmit_oldest]); + struct cstate *lcs = ocs; + struct cstate *cs = lcs->next; + unsigned long deltaS, deltaA; + short changes = 0; int hlen; unsigned char new_seq[16]; - register unsigned char *cp = new_seq; + unsigned char *cp = new_seq; struct iphdr *ip; struct tcphdr *th, *oth; __sum16 csum; @@ -486,11 +486,11 @@ uncompressed: int slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) { - register int changes; + int changes; long x; - register struct tcphdr *thp; - register struct iphdr *ip; - register struct cstate *cs; + struct tcphdr *thp; + struct iphdr *ip; + struct cstate *cs; int len, hdrlen; unsigned char *cp = icp; @@ -543,7 +543,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) switch(changes & SPECIALS_MASK){ case SPECIAL_I: /* Echoed terminal traffic */ { - register short i; + short i; i = ntohs(ip->tot_len) - hdrlen; thp->ack_seq = htonl( ntohl(thp->ack_seq) + i); thp->seq = htonl( ntohl(thp->seq) + i); @@ -637,7 +637,7 @@ bad: int slhc_remember(struct slcompress *comp, unsigned char *icp, int isize) { - register struct cstate *cs; + struct cstate *cs; unsigned ihl; unsigned char index; diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index fcf31335a8b6..dacb4f680fd4 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1005,7 +1005,7 @@ static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num, const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; *len = skb_frag_size(frag); - return kmap_atomic(skb_frag_page(frag)) + frag->page_offset; + return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb, diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index b39ee714fb01..e39f41efda3e 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -221,6 +221,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, int tailroom = skb_tailroom(skb); u32 packet_len; u32 padbytes = 0xffff0000; + void *ptr; padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4; @@ -256,13 +257,11 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, } packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len; - skb_push(skb, 4); - cpu_to_le32s(&packet_len); - skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); + ptr = skb_push(skb, 4); + put_unaligned_le32(packet_len, ptr); if (padlen) { - cpu_to_le32s(&padbytes); - memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); + put_unaligned_le32(padbytes, skb_tail_pointer(skb)); skb_put(skb, sizeof(padbytes)); } diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 0bc457ba8574..daa54486ab09 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1366,8 +1366,7 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) return 0; skb_trim(skb, skb->len - 4); - memcpy(&rx_hdr, skb_tail_pointer(skb), 4); - le32_to_cpus(&rx_hdr); + rx_hdr = get_unaligned_le32(skb_tail_pointer(skb)); pkt_cnt = (u16)rx_hdr; hdr_off = (u16)(rx_hdr >> 16); @@ -1422,6 +1421,7 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) int frame_size = dev->maxpacket; int mss = skb_shinfo(skb)->gso_size; int headroom; + void *ptr; tx_hdr1 = skb->len; tx_hdr2 = mss; @@ -1436,13 +1436,9 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) return NULL; } - skb_push(skb, 4); - cpu_to_le32s(&tx_hdr2); - skb_copy_to_linear_data(skb, &tx_hdr2, 4); - - skb_push(skb, 4); - cpu_to_le32s(&tx_hdr1); - skb_copy_to_linear_data(skb, &tx_hdr1, 4); + ptr = skb_push(skb, 8); + put_unaligned_le32(tx_hdr1, ptr); + put_unaligned_le32(tx_hdr2, ptr + 4); return skb; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index f033fee225a1..58f5a219fb65 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1258,8 +1258,7 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb) return; } - memcpy(&intdata, urb->transfer_buffer, 4); - le32_to_cpus(&intdata); + intdata = get_unaligned_le32(urb->transfer_buffer); if (intdata & INT_ENP_PHY_INT) { netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); @@ -2730,6 +2729,7 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags) { u32 tx_cmd_a, tx_cmd_b; + void *ptr; if (skb_cow_head(skb, TX_OVERHEAD)) { dev_kfree_skb_any(skb); @@ -2758,13 +2758,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_; } - skb_push(skb, 4); - cpu_to_le32s(&tx_cmd_b); - memcpy(skb->data, &tx_cmd_b, 4); - - skb_push(skb, 4); - cpu_to_le32s(&tx_cmd_a); - memcpy(skb->data, &tx_cmd_a, 4); + ptr = skb_push(skb, 8); + put_unaligned_le32(tx_cmd_a, ptr); + put_unaligned_le32(tx_cmd_b, ptr + 4); return skb; } @@ -3105,16 +3101,13 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) struct sk_buff *skb2; unsigned char *packet; - memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a)); - le32_to_cpus(&rx_cmd_a); + rx_cmd_a = get_unaligned_le32(skb->data); skb_pull(skb, sizeof(rx_cmd_a)); - memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); - le32_to_cpus(&rx_cmd_b); + rx_cmd_b = get_unaligned_le32(skb->data); skb_pull(skb, sizeof(rx_cmd_b)); - memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c)); - le16_to_cpus(&rx_cmd_c); + rx_cmd_c = get_unaligned_le16(skb->data); skb_pull(skb, sizeof(rx_cmd_c)); packet = skb->data; diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c index 6c2b3e368efe..217a2d8fa47b 100644 --- a/drivers/net/usb/lg-vl600.c +++ b/drivers/net/usb/lg-vl600.c @@ -87,9 +87,7 @@ static void vl600_unbind(struct usbnet *dev, struct usb_interface *intf) { struct vl600_state *s = dev->driver_priv; - if (s->current_rx_buf) - dev_kfree_skb(s->current_rx_buf); - + dev_kfree_skb(s->current_rx_buf); kfree(s); return usbnet_cdc_unbind(dev, intf); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 04137ac373b0..08726090570e 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -22,10 +22,11 @@ #include <linux/mdio.h> #include <linux/usb/cdc.h> #include <linux/suspend.h> +#include <linux/atomic.h> #include <linux/acpi.h> /* Information for net-next */ -#define NETNEXT_VERSION "09" +#define NETNEXT_VERSION "10" /* Information for net */ #define NET_VERSION "10" @@ -444,18 +445,18 @@ #define UPS_FLAGS_250M_CKDIV BIT(2) #define UPS_FLAGS_EN_ALDPS BIT(3) #define UPS_FLAGS_CTAP_SHORT_DIS BIT(4) -#define UPS_FLAGS_SPEED_MASK (0xf << 16) #define ups_flags_speed(x) ((x) << 16) #define UPS_FLAGS_EN_EEE BIT(20) #define UPS_FLAGS_EN_500M_EEE BIT(21) #define UPS_FLAGS_EN_EEE_CKDIV BIT(22) +#define UPS_FLAGS_EEE_PLLOFF_100 BIT(23) #define UPS_FLAGS_EEE_PLLOFF_GIGA BIT(24) #define UPS_FLAGS_EEE_CMOD_LV_EN BIT(25) #define UPS_FLAGS_EN_GREEN BIT(26) #define UPS_FLAGS_EN_FLOW_CTR BIT(27) enum spd_duplex { - NWAY_10M_HALF = 1, + NWAY_10M_HALF, NWAY_10M_FULL, NWAY_100M_HALF, NWAY_100M_FULL, @@ -583,6 +584,9 @@ enum rtl_register_content { #define TX_ALIGN 4 #define RX_ALIGN 8 +#define RTL8152_RX_MAX_PENDING 4096 +#define RTL8152_RXFG_HEADSZ 256 + #define INTR_LINK 0x0004 #define RTL8152_REQT_READ 0xc0 @@ -615,7 +619,7 @@ enum rtl8152_flags { RTL8152_LINK_CHG, SELECTIVE_SUSPEND, PHY_RESET, - SCHEDULE_NAPI, + SCHEDULE_TASKLET, GREEN_ETHERNET, DELL_TB_RX_AGG_BUG, }; @@ -694,11 +698,11 @@ struct tx_desc { struct r8152; struct rx_agg { - struct list_head list; + struct list_head list, info_list; struct urb *urb; struct r8152 *context; + struct page *page; void *buffer; - void *head; }; struct tx_agg { @@ -719,7 +723,7 @@ struct r8152 { struct net_device *netdev; struct urb *intr_urb; struct tx_agg tx_info[RTL8152_MAX_TX]; - struct rx_agg rx_info[RTL8152_MAX_RX]; + struct list_head rx_info, rx_used; struct list_head rx_done, tx_free; struct sk_buff_head tx_queue, rx_queue; spinlock_t rx_lock, tx_lock; @@ -729,6 +733,7 @@ struct r8152 { #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notifier; #endif + struct tasklet_struct tx_tl; struct rtl_ops { void (*init)(struct r8152 *); @@ -744,13 +749,39 @@ struct r8152 { void (*autosuspend_en)(struct r8152 *tp, bool enable); } rtl_ops; + struct ups_info { + u32 _10m_ckdiv:1; + u32 _250m_ckdiv:1; + u32 aldps:1; + u32 lite_mode:2; + u32 speed_duplex:4; + u32 eee:1; + u32 eee_lite:1; + u32 eee_ckdiv:1; + u32 eee_plloff_100:1; + u32 eee_plloff_giga:1; + u32 eee_cmod_lv:1; + u32 green:1; + u32 flow_control:1; + u32 ctap_short_off:1; + } ups_info; + + atomic_t rx_count; + + bool eee_en; int intr_interval; u32 saved_wolopts; u32 msg_enable; u32 tx_qlen; u32 coalesce; + u32 advertising; + u32 rx_buf_sz; + u32 rx_copybreak; + u32 rx_pending; + u16 ocp_base; u16 speed; + u16 eee_adv; u8 *intr_buff; u8 version; u8 duplex; @@ -777,6 +808,13 @@ enum tx_csum_stat { TX_CSUM_NONE }; +#define RTL_ADVERTISED_10_HALF BIT(0) +#define RTL_ADVERTISED_10_FULL BIT(1) +#define RTL_ADVERTISED_100_HALF BIT(2) +#define RTL_ADVERTISED_100_FULL BIT(3) +#define RTL_ADVERTISED_1000_HALF BIT(4) +#define RTL_ADVERTISED_1000_FULL BIT(5) + /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). * The RTL chips use a 64 element hash table based on the Ethernet CRC. */ @@ -1394,7 +1432,7 @@ static void write_bulk_callback(struct urb *urb) return; if (!skb_queue_empty(&tp->tx_queue)) - napi_schedule(&tp->napi); + tasklet_schedule(&tp->tx_tl); } static void intr_callback(struct urb *urb) @@ -1470,18 +1508,72 @@ static inline void *tx_agg_align(void *data) return (void *)ALIGN((uintptr_t)data, TX_ALIGN); } +static void free_rx_agg(struct r8152 *tp, struct rx_agg *agg) +{ + list_del(&agg->info_list); + + usb_free_urb(agg->urb); + put_page(agg->page); + kfree(agg); + + atomic_dec(&tp->rx_count); +} + +static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags) +{ + struct net_device *netdev = tp->netdev; + int node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; + unsigned int order = get_order(tp->rx_buf_sz); + struct rx_agg *rx_agg; + unsigned long flags; + + rx_agg = kmalloc_node(sizeof(*rx_agg), mflags, node); + if (!rx_agg) + return NULL; + + rx_agg->page = alloc_pages(mflags | __GFP_COMP, order); + if (!rx_agg->page) + goto free_rx; + + rx_agg->buffer = page_address(rx_agg->page); + + rx_agg->urb = usb_alloc_urb(0, mflags); + if (!rx_agg->urb) + goto free_buf; + + rx_agg->context = tp; + + INIT_LIST_HEAD(&rx_agg->list); + INIT_LIST_HEAD(&rx_agg->info_list); + spin_lock_irqsave(&tp->rx_lock, flags); + list_add_tail(&rx_agg->info_list, &tp->rx_info); + spin_unlock_irqrestore(&tp->rx_lock, flags); + + atomic_inc(&tp->rx_count); + + return rx_agg; + +free_buf: + __free_pages(rx_agg->page, order); +free_rx: + kfree(rx_agg); + return NULL; +} + static void free_all_mem(struct r8152 *tp) { + struct rx_agg *agg, *agg_next; + unsigned long flags; int i; - for (i = 0; i < RTL8152_MAX_RX; i++) { - usb_free_urb(tp->rx_info[i].urb); - tp->rx_info[i].urb = NULL; + spin_lock_irqsave(&tp->rx_lock, flags); - kfree(tp->rx_info[i].buffer); - tp->rx_info[i].buffer = NULL; - tp->rx_info[i].head = NULL; - } + list_for_each_entry_safe(agg, agg_next, &tp->rx_info, info_list) + free_rx_agg(tp, agg); + + spin_unlock_irqrestore(&tp->rx_lock, flags); + + WARN_ON(atomic_read(&tp->rx_count)); for (i = 0; i < RTL8152_MAX_TX; i++) { usb_free_urb(tp->tx_info[i].urb); @@ -1505,46 +1597,28 @@ static int alloc_all_mem(struct r8152 *tp) struct usb_interface *intf = tp->intf; struct usb_host_interface *alt = intf->cur_altsetting; struct usb_host_endpoint *ep_intr = alt->endpoint + 2; - struct urb *urb; int node, i; - u8 *buf; node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; spin_lock_init(&tp->rx_lock); spin_lock_init(&tp->tx_lock); + INIT_LIST_HEAD(&tp->rx_info); INIT_LIST_HEAD(&tp->tx_free); INIT_LIST_HEAD(&tp->rx_done); skb_queue_head_init(&tp->tx_queue); skb_queue_head_init(&tp->rx_queue); + atomic_set(&tp->rx_count, 0); for (i = 0; i < RTL8152_MAX_RX; i++) { - buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); - if (!buf) - goto err1; - - if (buf != rx_agg_align(buf)) { - kfree(buf); - buf = kmalloc_node(agg_buf_sz + RX_ALIGN, GFP_KERNEL, - node); - if (!buf) - goto err1; - } - - urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - kfree(buf); + if (!alloc_rx_agg(tp, GFP_KERNEL)) goto err1; - } - - INIT_LIST_HEAD(&tp->rx_info[i].list); - tp->rx_info[i].context = tp; - tp->rx_info[i].urb = urb; - tp->rx_info[i].buffer = buf; - tp->rx_info[i].head = rx_agg_align(buf); } for (i = 0; i < RTL8152_MAX_TX; i++) { + struct urb *urb; + u8 *buf; + buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); if (!buf) goto err1; @@ -1910,6 +1984,46 @@ return_result: return checksum; } +static inline bool rx_count_exceed(struct r8152 *tp) +{ + return atomic_read(&tp->rx_count) > RTL8152_MAX_RX; +} + +static inline int agg_offset(struct rx_agg *agg, void *addr) +{ + return (int)(addr - agg->buffer); +} + +static struct rx_agg *rtl_get_free_rx(struct r8152 *tp, gfp_t mflags) +{ + struct rx_agg *agg, *agg_next, *agg_free = NULL; + unsigned long flags; + + spin_lock_irqsave(&tp->rx_lock, flags); + + list_for_each_entry_safe(agg, agg_next, &tp->rx_used, list) { + if (page_count(agg->page) == 1) { + if (!agg_free) { + list_del_init(&agg->list); + agg_free = agg; + continue; + } + if (rx_count_exceed(tp)) { + list_del_init(&agg->list); + free_rx_agg(tp, agg); + } + break; + } + } + + spin_unlock_irqrestore(&tp->rx_lock, flags); + + if (!agg_free && atomic_read(&tp->rx_count) < tp->rx_pending) + agg_free = alloc_rx_agg(tp, mflags); + + return agg_free; +} + static int rx_bottom(struct r8152 *tp, int budget) { unsigned long flags; @@ -1945,7 +2059,7 @@ static int rx_bottom(struct r8152 *tp, int budget) list_for_each_safe(cursor, next, &rx_queue) { struct rx_desc *rx_desc; - struct rx_agg *agg; + struct rx_agg *agg, *agg_free; int len_used = 0; struct urb *urb; u8 *rx_data; @@ -1957,14 +2071,16 @@ static int rx_bottom(struct r8152 *tp, int budget) if (urb->actual_length < ETH_ZLEN) goto submit; - rx_desc = agg->head; - rx_data = agg->head; + agg_free = rtl_get_free_rx(tp, GFP_ATOMIC); + + rx_desc = agg->buffer; + rx_data = agg->buffer; len_used += sizeof(struct rx_desc); while (urb->actual_length > len_used) { struct net_device *netdev = tp->netdev; struct net_device_stats *stats = &netdev->stats; - unsigned int pkt_len; + unsigned int pkt_len, rx_frag_head_sz; struct sk_buff *skb; /* limite the skb numbers for rx_queue */ @@ -1982,22 +2098,37 @@ static int rx_bottom(struct r8152 *tp, int budget) pkt_len -= ETH_FCS_LEN; rx_data += sizeof(struct rx_desc); - skb = napi_alloc_skb(napi, pkt_len); + if (!agg_free || tp->rx_copybreak > pkt_len) + rx_frag_head_sz = pkt_len; + else + rx_frag_head_sz = tp->rx_copybreak; + + skb = napi_alloc_skb(napi, rx_frag_head_sz); if (!skb) { stats->rx_dropped++; goto find_next_rx; } skb->ip_summed = r8152_rx_csum(tp, rx_desc); - memcpy(skb->data, rx_data, pkt_len); - skb_put(skb, pkt_len); + memcpy(skb->data, rx_data, rx_frag_head_sz); + skb_put(skb, rx_frag_head_sz); + pkt_len -= rx_frag_head_sz; + rx_data += rx_frag_head_sz; + if (pkt_len) { + skb_add_rx_frag(skb, 0, agg->page, + agg_offset(agg, rx_data), + pkt_len, + SKB_DATA_ALIGN(pkt_len)); + get_page(agg->page); + } + skb->protocol = eth_type_trans(skb, netdev); rtl_rx_vlan_tag(rx_desc, skb); if (work_done < budget) { - napi_gro_receive(napi, skb); work_done++; stats->rx_packets++; - stats->rx_bytes += pkt_len; + stats->rx_bytes += skb->len; + napi_gro_receive(napi, skb); } else { __skb_queue_tail(&tp->rx_queue, skb); } @@ -2005,10 +2136,24 @@ static int rx_bottom(struct r8152 *tp, int budget) find_next_rx: rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN); rx_desc = (struct rx_desc *)rx_data; - len_used = (int)(rx_data - (u8 *)agg->head); + len_used = agg_offset(agg, rx_data); len_used += sizeof(struct rx_desc); } + WARN_ON(!agg_free && page_count(agg->page) > 1); + + if (agg_free) { + spin_lock_irqsave(&tp->rx_lock, flags); + if (page_count(agg->page) == 1) { + list_add(&agg_free->list, &tp->rx_used); + } else { + list_add_tail(&agg->list, &tp->rx_used); + agg = agg_free; + urb = agg->urb; + } + spin_unlock_irqrestore(&tp->rx_lock, flags); + } + submit: if (!ret) { ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); @@ -2065,8 +2210,12 @@ static void tx_bottom(struct r8152 *tp) } while (res == 0); } -static void bottom_half(struct r8152 *tp) +static void bottom_half(unsigned long data) { + struct r8152 *tp; + + tp = (struct r8152 *)data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -2078,7 +2227,7 @@ static void bottom_half(struct r8152 *tp) if (!netif_carrier_ok(tp->netdev)) return; - clear_bit(SCHEDULE_NAPI, &tp->flags); + clear_bit(SCHEDULE_TASKLET, &tp->flags); tx_bottom(tp); } @@ -2089,16 +2238,12 @@ static int r8152_poll(struct napi_struct *napi, int budget) int work_done; work_done = rx_bottom(tp, budget); - bottom_half(tp); if (work_done < budget) { if (!napi_complete_done(napi, work_done)) goto out; if (!list_empty(&tp->rx_done)) napi_schedule(napi); - else if (!skb_queue_empty(&tp->tx_queue) && - !list_empty(&tp->tx_free)) - napi_schedule(napi); } out: @@ -2116,7 +2261,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) return 0; usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), - agg->head, agg_buf_sz, + agg->buffer, tp->rx_buf_sz, (usb_complete_t)read_bulk_callback, agg); ret = usb_submit_urb(agg->urb, mem_flags); @@ -2252,11 +2397,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, if (!list_empty(&tp->tx_free)) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { - set_bit(SCHEDULE_NAPI, &tp->flags); + set_bit(SCHEDULE_TASKLET, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } else { usb_mark_last_busy(tp->udev); - napi_schedule(&tp->napi); + tasklet_schedule(&tp->tx_tl); } } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) { netif_stop_queue(netdev); @@ -2333,44 +2478,80 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable) static int rtl_start_rx(struct r8152 *tp) { - int i, ret = 0; + struct rx_agg *agg, *agg_next; + struct list_head tmp_list; + unsigned long flags; + int ret = 0, i = 0; - INIT_LIST_HEAD(&tp->rx_done); - for (i = 0; i < RTL8152_MAX_RX; i++) { - INIT_LIST_HEAD(&tp->rx_info[i].list); - ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); - if (ret) - break; - } + INIT_LIST_HEAD(&tmp_list); - if (ret && ++i < RTL8152_MAX_RX) { - struct list_head rx_queue; - unsigned long flags; + spin_lock_irqsave(&tp->rx_lock, flags); - INIT_LIST_HEAD(&rx_queue); + INIT_LIST_HEAD(&tp->rx_done); + INIT_LIST_HEAD(&tp->rx_used); - do { - struct rx_agg *agg = &tp->rx_info[i++]; - struct urb *urb = agg->urb; + list_splice_init(&tp->rx_info, &tmp_list); - urb->actual_length = 0; - list_add_tail(&agg->list, &rx_queue); - } while (i < RTL8152_MAX_RX); + spin_unlock_irqrestore(&tp->rx_lock, flags); - spin_lock_irqsave(&tp->rx_lock, flags); - list_splice_tail(&rx_queue, &tp->rx_done); - spin_unlock_irqrestore(&tp->rx_lock, flags); + list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { + INIT_LIST_HEAD(&agg->list); + + /* Only RTL8152_MAX_RX rx_agg need to be submitted. */ + if (++i > RTL8152_MAX_RX) { + spin_lock_irqsave(&tp->rx_lock, flags); + list_add_tail(&agg->list, &tp->rx_used); + spin_unlock_irqrestore(&tp->rx_lock, flags); + } else if (unlikely(ret < 0)) { + spin_lock_irqsave(&tp->rx_lock, flags); + list_add_tail(&agg->list, &tp->rx_done); + spin_unlock_irqrestore(&tp->rx_lock, flags); + } else { + ret = r8152_submit_rx(tp, agg, GFP_KERNEL); + } } + spin_lock_irqsave(&tp->rx_lock, flags); + WARN_ON(!list_empty(&tp->rx_info)); + list_splice(&tmp_list, &tp->rx_info); + spin_unlock_irqrestore(&tp->rx_lock, flags); + return ret; } static int rtl_stop_rx(struct r8152 *tp) { - int i; + struct rx_agg *agg, *agg_next; + struct list_head tmp_list; + unsigned long flags; + + INIT_LIST_HEAD(&tmp_list); + + /* The usb_kill_urb() couldn't be used in atomic. + * Therefore, move the list of rx_info to a tmp one. + * Then, list_for_each_entry_safe could be used without + * spin lock. + */ - for (i = 0; i < RTL8152_MAX_RX; i++) - usb_kill_urb(tp->rx_info[i].urb); + spin_lock_irqsave(&tp->rx_lock, flags); + list_splice_init(&tp->rx_info, &tmp_list); + spin_unlock_irqrestore(&tp->rx_lock, flags); + + list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { + /* At least RTL8152_MAX_RX rx_agg have the page_count being + * equal to 1, so the other ones could be freed safely. + */ + if (page_count(agg->page) > 1) + free_rx_agg(tp, agg); + else + usb_kill_urb(agg->urb); + } + + /* Move back the list of temp to the rx_info */ + spin_lock_irqsave(&tp->rx_lock, flags); + WARN_ON(!list_empty(&tp->rx_info)); + list_splice(&tmp_list, &tp->rx_info); + spin_unlock_irqrestore(&tp->rx_lock, flags); while (!skb_queue_empty(&tp->rx_queue)) dev_kfree_skb(__skb_dequeue(&tp->rx_queue)); @@ -2450,7 +2631,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp) static void r8153_set_rx_early_size(struct r8152 *tp) { - u32 ocp_data = agg_buf_sz - rx_reserved_size(tp->netdev->mtu); + u32 ocp_data = tp->rx_buf_sz - rx_reserved_size(tp->netdev->mtu); switch (tp->version) { case RTL_VER_03: @@ -2701,14 +2882,76 @@ static void r8153_u2p3en(struct r8152 *tp, bool enable) ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); } -static void r8153b_ups_flags_w1w0(struct r8152 *tp, u32 set, u32 clear) +static void r8153b_ups_flags(struct r8152 *tp) { - u32 ocp_data; + u32 ups_flags = 0; + + if (tp->ups_info.green) + ups_flags |= UPS_FLAGS_EN_GREEN; + + if (tp->ups_info.aldps) + ups_flags |= UPS_FLAGS_EN_ALDPS; + + if (tp->ups_info.eee) + ups_flags |= UPS_FLAGS_EN_EEE; + + if (tp->ups_info.flow_control) + ups_flags |= UPS_FLAGS_EN_FLOW_CTR; - ocp_data = ocp_read_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS); - ocp_data &= ~clear; - ocp_data |= set; - ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ocp_data); + if (tp->ups_info.eee_ckdiv) + ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; + + if (tp->ups_info.eee_cmod_lv) + ups_flags |= UPS_FLAGS_EEE_CMOD_LV_EN; + + if (tp->ups_info._10m_ckdiv) + ups_flags |= UPS_FLAGS_EN_10M_CKDIV; + + if (tp->ups_info.eee_plloff_100) + ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; + + if (tp->ups_info.eee_plloff_giga) + ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; + + if (tp->ups_info._250m_ckdiv) + ups_flags |= UPS_FLAGS_250M_CKDIV; + + if (tp->ups_info.ctap_short_off) + ups_flags |= UPS_FLAGS_CTAP_SHORT_DIS; + + switch (tp->ups_info.speed_duplex) { + case NWAY_10M_HALF: + ups_flags |= ups_flags_speed(1); + break; + case NWAY_10M_FULL: + ups_flags |= ups_flags_speed(2); + break; + case NWAY_100M_HALF: + ups_flags |= ups_flags_speed(3); + break; + case NWAY_100M_FULL: + ups_flags |= ups_flags_speed(4); + break; + case NWAY_1000M_FULL: + ups_flags |= ups_flags_speed(5); + break; + case FORCE_10M_HALF: + ups_flags |= ups_flags_speed(6); + break; + case FORCE_10M_FULL: + ups_flags |= ups_flags_speed(7); + break; + case FORCE_100M_HALF: + ups_flags |= ups_flags_speed(8); + break; + case FORCE_100M_FULL: + ups_flags |= ups_flags_speed(9); + break; + default: + break; + } + + ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); } static void r8153b_green_en(struct r8152 *tp, bool enable) @@ -2729,7 +2972,7 @@ static void r8153b_green_en(struct r8152 *tp, bool enable) data |= GREEN_ETH_EN; sram_write(tp, SRAM_GREEN_CFG, data); - r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_GREEN, 0); + tp->ups_info.green = enable; } static u16 r8153_phy_status(struct r8152 *tp, u16 desired) @@ -2759,6 +3002,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) { + r8153b_ups_flags(tp); + ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); @@ -3049,10 +3294,60 @@ static void r8152_eee_en(struct r8152 *tp, bool enable) ocp_reg_write(tp, OCP_EEE_CONFIG3, config3); } -static void r8152b_enable_eee(struct r8152 *tp) +static void r8153_eee_en(struct r8152 *tp, bool enable) { - r8152_eee_en(tp, true); - r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX); + u32 ocp_data; + u16 config; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); + config = ocp_reg_read(tp, OCP_EEE_CFG); + + if (enable) { + ocp_data |= EEE_RX_EN | EEE_TX_EN; + config |= EEE10_EN; + } else { + ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); + config &= ~EEE10_EN; + } + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); + ocp_reg_write(tp, OCP_EEE_CFG, config); + + tp->ups_info.eee = enable; +} + +static void rtl_eee_enable(struct r8152 *tp, bool enable) +{ + switch (tp->version) { + case RTL_VER_01: + case RTL_VER_02: + case RTL_VER_07: + if (enable) { + r8152_eee_en(tp, true); + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, + tp->eee_adv); + } else { + r8152_eee_en(tp, false); + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); + } + break; + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + case RTL_VER_08: + case RTL_VER_09: + if (enable) { + r8153_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); + } else { + r8153_eee_en(tp, false); + ocp_reg_write(tp, OCP_EEE_ADV, 0); + } + break; + default: + break; + } } static void r8152b_enable_fc(struct r8152 *tp) @@ -3062,6 +3357,8 @@ static void r8152b_enable_fc(struct r8152 *tp) anar = r8152_mdio_read(tp, MII_ADVERTISE); anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; r8152_mdio_write(tp, MII_ADVERTISE, anar); + + tp->ups_info.flow_control = true; } static void rtl8152_disable(struct r8152 *tp) @@ -3073,7 +3370,7 @@ static void rtl8152_disable(struct r8152 *tp) static void r8152b_hw_phy_cfg(struct r8152 *tp) { - r8152b_enable_eee(tp); + rtl_eee_enable(tp, tp->eee_en); r8152_aldps_en(tp, true); r8152b_enable_fc(tp); @@ -3255,52 +3552,8 @@ static void r8153_aldps_en(struct r8152 *tp, bool enable) break; } } -} -static void r8153b_aldps_en(struct r8152 *tp, bool enable) -{ - r8153_aldps_en(tp, enable); - - if (enable) - r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_ALDPS, 0); - else - r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_ALDPS); -} - -static void r8153_eee_en(struct r8152 *tp, bool enable) -{ - u32 ocp_data; - u16 config; - - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - config = ocp_reg_read(tp, OCP_EEE_CFG); - - if (enable) { - ocp_data |= EEE_RX_EN | EEE_TX_EN; - config |= EEE10_EN; - } else { - ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); - config &= ~EEE10_EN; - } - - ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); - ocp_reg_write(tp, OCP_EEE_CFG, config); -} - -static void r8153b_eee_en(struct r8152 *tp, bool enable) -{ - r8153_eee_en(tp, enable); - - if (enable) - r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_EEE, 0); - else - r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_EEE); -} - -static void r8153b_enable_fc(struct r8152 *tp) -{ - r8152b_enable_fc(tp); - r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_FLOW_CTR, 0); + tp->ups_info.aldps = enable; } static void r8153_hw_phy_cfg(struct r8152 *tp) @@ -3312,8 +3565,7 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ - r8153_eee_en(tp, false); - ocp_reg_write(tp, OCP_EEE_ADV, 0); + rtl_eee_enable(tp, false); if (tp->version == RTL_VER_03) { data = ocp_reg_read(tp, OCP_EEE_CFG); @@ -3344,8 +3596,8 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) sram_write(tp, SRAM_10M_AMP1, 0x00af); sram_write(tp, SRAM_10M_AMP2, 0x0208); - r8153_eee_en(tp, true); - ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); + if (tp->eee_en) + rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); @@ -3378,15 +3630,14 @@ static u32 r8152_efuse_read(struct r8152 *tp, u8 addr) static void r8153b_hw_phy_cfg(struct r8152 *tp) { - u32 ocp_data, ups_flags = 0; + u32 ocp_data; u16 data; /* disable ALDPS before updating the PHY parameters */ - r8153b_aldps_en(tp, false); + r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ - r8153b_eee_en(tp, false); - ocp_reg_write(tp, OCP_EEE_ADV, 0); + rtl_eee_enable(tp, false); r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); @@ -3431,28 +3682,27 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp) data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); + tp->ups_info.eee_ckdiv = true; data = ocp_reg_read(tp, OCP_DOWN_SPEED); data |= EN_EEE_CMODE | EN_EEE_1000 | EN_10M_CLKDIV; ocp_reg_write(tp, OCP_DOWN_SPEED, data); + tp->ups_info.eee_cmod_lv = true; + tp->ups_info._10m_ckdiv = true; + tp->ups_info.eee_plloff_giga = true; ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); ocp_reg_write(tp, OCP_SYSCLK_CFG, clk_div_expo(5)); - - ups_flags |= UPS_FLAGS_EN_10M_CKDIV | UPS_FLAGS_250M_CKDIV | - UPS_FLAGS_EN_EEE_CKDIV | UPS_FLAGS_EEE_CMOD_LV_EN | - UPS_FLAGS_EEE_PLLOFF_GIGA; + tp->ups_info._250m_ckdiv = true; r8153_patch_request(tp, false); } - r8153b_ups_flags_w1w0(tp, ups_flags, 0); - - r8153b_eee_en(tp, true); - ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); + if (tp->eee_en) + rtl_eee_enable(tp, true); - r8153b_aldps_en(tp, true); - r8153b_enable_fc(tp); + r8153_aldps_en(tp, true); + r8152b_enable_fc(tp); r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); @@ -3603,111 +3853,118 @@ static void rtl8153_disable(struct r8152 *tp) r8153_aldps_en(tp, true); } -static void rtl8153b_disable(struct r8152 *tp) +static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, + u32 advertising) { - r8153b_aldps_en(tp, false); - rtl_disable(tp); - rtl_reset_bmu(tp); - r8153b_aldps_en(tp, true); -} - -static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) -{ - u16 bmcr, anar, gbcr; - enum spd_duplex speed_duplex; + u16 bmcr; int ret = 0; - anar = r8152_mdio_read(tp, MII_ADVERTISE); - anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL); - if (tp->mii.supports_gmii) { - gbcr = r8152_mdio_read(tp, MII_CTRL1000); - gbcr &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - } else { - gbcr = 0; - } - if (autoneg == AUTONEG_DISABLE) { - if (speed == SPEED_10) { - bmcr = 0; - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - speed_duplex = FORCE_10M_HALF; - } else if (speed == SPEED_100) { - bmcr = BMCR_SPEED100; - anar |= ADVERTISE_100HALF | ADVERTISE_100FULL; - speed_duplex = FORCE_100M_HALF; - } else if (speed == SPEED_1000 && tp->mii.supports_gmii) { - bmcr = BMCR_SPEED1000; - gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; - speed_duplex = NWAY_1000M_FULL; - } else { - ret = -EINVAL; - goto out; - } + if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL) + return -EINVAL; - if (duplex == DUPLEX_FULL) { - bmcr |= BMCR_FULLDPLX; - if (speed != SPEED_1000) - speed_duplex++; - } - } else { - if (speed == SPEED_10) { + switch (speed) { + case SPEED_10: + bmcr = BMCR_SPEED10; if (duplex == DUPLEX_FULL) { - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - speed_duplex = NWAY_10M_FULL; + bmcr |= BMCR_FULLDPLX; + tp->ups_info.speed_duplex = FORCE_10M_FULL; } else { - anar |= ADVERTISE_10HALF; - speed_duplex = NWAY_10M_HALF; + tp->ups_info.speed_duplex = FORCE_10M_HALF; } - } else if (speed == SPEED_100) { + break; + case SPEED_100: + bmcr = BMCR_SPEED100; if (duplex == DUPLEX_FULL) { - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - anar |= ADVERTISE_100HALF | ADVERTISE_100FULL; - speed_duplex = NWAY_100M_FULL; + bmcr |= BMCR_FULLDPLX; + tp->ups_info.speed_duplex = FORCE_100M_FULL; } else { - anar |= ADVERTISE_10HALF; - anar |= ADVERTISE_100HALF; - speed_duplex = NWAY_100M_HALF; + tp->ups_info.speed_duplex = FORCE_100M_HALF; } - } else if (speed == SPEED_1000 && tp->mii.supports_gmii) { - if (duplex == DUPLEX_FULL) { - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - anar |= ADVERTISE_100HALF | ADVERTISE_100FULL; - gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; - } else { - anar |= ADVERTISE_10HALF; - anar |= ADVERTISE_100HALF; - gbcr |= ADVERTISE_1000HALF; + break; + case SPEED_1000: + if (tp->mii.supports_gmii) { + bmcr = BMCR_SPEED1000 | BMCR_FULLDPLX; + tp->ups_info.speed_duplex = NWAY_1000M_FULL; + break; } - speed_duplex = NWAY_1000M_FULL; - } else { + /* fall through */ + default: ret = -EINVAL; goto out; } + if (duplex == DUPLEX_FULL) + tp->mii.full_duplex = 1; + else + tp->mii.full_duplex = 0; + + tp->mii.force_media = 1; + } else { + u16 anar, tmp1; + u32 support; + + support = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | + RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; + + if (tp->mii.supports_gmii) + support |= RTL_ADVERTISED_1000_FULL; + + if (!(advertising & support)) + return -EINVAL; + + anar = r8152_mdio_read(tp, MII_ADVERTISE); + tmp1 = anar & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + if (advertising & RTL_ADVERTISED_10_HALF) { + tmp1 |= ADVERTISE_10HALF; + tp->ups_info.speed_duplex = NWAY_10M_HALF; + } + if (advertising & RTL_ADVERTISED_10_FULL) { + tmp1 |= ADVERTISE_10FULL; + tp->ups_info.speed_duplex = NWAY_10M_FULL; + } + + if (advertising & RTL_ADVERTISED_100_HALF) { + tmp1 |= ADVERTISE_100HALF; + tp->ups_info.speed_duplex = NWAY_100M_HALF; + } + if (advertising & RTL_ADVERTISED_100_FULL) { + tmp1 |= ADVERTISE_100FULL; + tp->ups_info.speed_duplex = NWAY_100M_FULL; + } + + if (anar != tmp1) { + r8152_mdio_write(tp, MII_ADVERTISE, tmp1); + tp->mii.advertising = tmp1; + } + + if (tp->mii.supports_gmii) { + u16 gbcr; + + gbcr = r8152_mdio_read(tp, MII_CTRL1000); + tmp1 = gbcr & ~(ADVERTISE_1000FULL | + ADVERTISE_1000HALF); + + if (advertising & RTL_ADVERTISED_1000_FULL) { + tmp1 |= ADVERTISE_1000FULL; + tp->ups_info.speed_duplex = NWAY_1000M_FULL; + } + + if (gbcr != tmp1) + r8152_mdio_write(tp, MII_CTRL1000, tmp1); + } + bmcr = BMCR_ANENABLE | BMCR_ANRESTART; + + tp->mii.force_media = 0; } if (test_and_clear_bit(PHY_RESET, &tp->flags)) bmcr |= BMCR_RESET; - if (tp->mii.supports_gmii) - r8152_mdio_write(tp, MII_CTRL1000, gbcr); - - r8152_mdio_write(tp, MII_ADVERTISE, anar); r8152_mdio_write(tp, MII_BMCR, bmcr); - switch (tp->version) { - case RTL_VER_08: - case RTL_VER_09: - r8153b_ups_flags_w1w0(tp, ups_flags_speed(speed_duplex), - UPS_FLAGS_SPEED_MASK); - break; - - default: - break; - } - if (bmcr & BMCR_RESET) { int i; @@ -3792,12 +4049,12 @@ static void rtl8153b_up(struct r8152 *tp) r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); - r8153b_aldps_en(tp, false); + r8153_aldps_en(tp, false); r8153_first_init(tp); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); - r8153b_aldps_en(tp, true); + r8153_aldps_en(tp, true); r8153_u2p3en(tp, true); r8153b_u1u2en(tp, true); } @@ -3812,9 +4069,9 @@ static void rtl8153b_down(struct r8152 *tp) r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); - r8153b_aldps_en(tp, false); + r8153_aldps_en(tp, false); r8153_enter_oob(tp); - r8153b_aldps_en(tp, true); + r8153_aldps_en(tp, true); } static bool rtl8152_in_nway(struct r8152 *tp) @@ -3870,9 +4127,11 @@ static void set_carrier(struct r8152 *tp) } else { if (netif_carrier_ok(netdev)) { netif_carrier_off(netdev); + tasklet_disable(&tp->tx_tl); napi_disable(napi); tp->rtl_ops.disable(tp); napi_enable(napi); + tasklet_enable(&tp->tx_tl); netif_info(tp, link, netdev, "carrier off\n"); } } @@ -3905,10 +4164,10 @@ static void rtl_work_func_t(struct work_struct *work) if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags)) _rtl8152_set_rx_mode(tp->netdev); - /* don't schedule napi before linking */ - if (test_and_clear_bit(SCHEDULE_NAPI, &tp->flags) && + /* don't schedule tasket before linking */ + if (test_and_clear_bit(SCHEDULE_TASKLET, &tp->flags) && netif_carrier_ok(tp->netdev)) - napi_schedule(&tp->napi); + tasklet_schedule(&tp->tx_tl); mutex_unlock(&tp->control); @@ -3930,7 +4189,8 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work) tp->rtl_ops.hw_phy_cfg(tp); - rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex); + rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, + tp->advertising); mutex_unlock(&tp->control); @@ -3994,6 +4254,7 @@ static int rtl8152_open(struct net_device *netdev) goto out_unlock; } napi_enable(&tp->napi); + tasklet_enable(&tp->tx_tl); mutex_unlock(&tp->control); @@ -4021,6 +4282,7 @@ static int rtl8152_close(struct net_device *netdev) #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif + tasklet_disable(&tp->tx_tl); napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); @@ -4289,6 +4551,7 @@ static int rtl8152_pre_reset(struct usb_interface *intf) return 0; netif_stop_queue(netdev); + tasklet_disable(&tp->tx_tl); napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); @@ -4332,6 +4595,7 @@ static int rtl8152_post_reset(struct usb_interface *intf) } napi_enable(&tp->napi); + tasklet_enable(&tp->tx_tl); netif_wake_queue(netdev); usb_submit_urb(tp->intr_urb, GFP_KERNEL); @@ -4485,10 +4749,12 @@ static int rtl8152_system_suspend(struct r8152 *tp) clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); + tasklet_disable(&tp->tx_tl); napi_disable(napi); cancel_delayed_work_sync(&tp->schedule); tp->rtl_ops.down(tp); napi_enable(napi); + tasklet_enable(&tp->tx_tl); } return 0; @@ -4642,20 +4908,46 @@ static int rtl8152_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct r8152 *tp = netdev_priv(dev); + u32 advertising = 0; int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; + if (test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_10_HALF; + + if (test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_10_FULL; + + if (test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_100_HALF; + + if (test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_100_FULL; + + if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_1000_HALF; + + if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_1000_FULL; + mutex_lock(&tp->control); ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed, - cmd->base.duplex); + cmd->base.duplex, advertising); if (!ret) { tp->autoneg = cmd->base.autoneg; tp->speed = cmd->base.speed; tp->duplex = cmd->base.duplex; + tp->advertising = advertising; } mutex_unlock(&tp->control); @@ -4731,7 +5023,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee) { - u32 ocp_data, lp, adv, supported = 0; + u32 lp, adv, supported = 0; u16 val; val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); @@ -4743,13 +5035,10 @@ static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee) val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); lp = mmd_eee_adv_to_ethtool_adv_t(val); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - ocp_data &= EEE_RX_EN | EEE_TX_EN; - - eee->eee_enabled = !!ocp_data; + eee->eee_enabled = tp->eee_en; eee->eee_active = !!(supported & adv & lp); eee->supported = supported; - eee->advertised = adv; + eee->advertised = tp->eee_adv; eee->lp_advertised = lp; return 0; @@ -4759,19 +5048,17 @@ static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee) { u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); - r8152_eee_en(tp, eee->eee_enabled); - - if (!eee->eee_enabled) - val = 0; + tp->eee_en = eee->eee_enabled; + tp->eee_adv = val; - r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + rtl_eee_enable(tp, tp->eee_en); return 0; } static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee) { - u32 ocp_data, lp, adv, supported = 0; + u32 lp, adv, supported = 0; u16 val; val = ocp_reg_read(tp, OCP_EEE_ABLE); @@ -4783,46 +5070,15 @@ static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee) val = ocp_reg_read(tp, OCP_EEE_LPABLE); lp = mmd_eee_adv_to_ethtool_adv_t(val); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - ocp_data &= EEE_RX_EN | EEE_TX_EN; - - eee->eee_enabled = !!ocp_data; + eee->eee_enabled = tp->eee_en; eee->eee_active = !!(supported & adv & lp); eee->supported = supported; - eee->advertised = adv; + eee->advertised = tp->eee_adv; eee->lp_advertised = lp; return 0; } -static int r8153_set_eee(struct r8152 *tp, struct ethtool_eee *eee) -{ - u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); - - r8153_eee_en(tp, eee->eee_enabled); - - if (!eee->eee_enabled) - val = 0; - - ocp_reg_write(tp, OCP_EEE_ADV, val); - - return 0; -} - -static int r8153b_set_eee(struct r8152 *tp, struct ethtool_eee *eee) -{ - u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); - - r8153b_eee_en(tp, eee->eee_enabled); - - if (!eee->eee_enabled) - val = 0; - - ocp_reg_write(tp, OCP_EEE_ADV, val); - - return 0; -} - static int rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata) { @@ -4956,6 +5212,77 @@ static int rtl8152_set_coalesce(struct net_device *netdev, return ret; } +static int rtl8152_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tunable, void *d) +{ + struct r8152 *tp = netdev_priv(netdev); + + switch (tunable->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)d = tp->rx_copybreak; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int rtl8152_set_tunable(struct net_device *netdev, + const struct ethtool_tunable *tunable, + const void *d) +{ + struct r8152 *tp = netdev_priv(netdev); + u32 val; + + switch (tunable->id) { + case ETHTOOL_RX_COPYBREAK: + val = *(u32 *)d; + if (val < ETH_ZLEN) { + netif_err(tp, rx_err, netdev, + "Invalid rx copy break value\n"); + return -EINVAL; + } + + if (tp->rx_copybreak != val) { + napi_disable(&tp->napi); + tp->rx_copybreak = val; + napi_enable(&tp->napi); + } + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static void rtl8152_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct r8152 *tp = netdev_priv(netdev); + + ring->rx_max_pending = RTL8152_RX_MAX_PENDING; + ring->rx_pending = tp->rx_pending; +} + +static int rtl8152_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct r8152 *tp = netdev_priv(netdev); + + if (ring->rx_pending < (RTL8152_MAX_RX * 2)) + return -EINVAL; + + if (tp->rx_pending != ring->rx_pending) { + napi_disable(&tp->napi); + tp->rx_pending = ring->rx_pending; + napi_enable(&tp->napi); + } + + return 0; +} + static const struct ethtool_ops ops = { .get_drvinfo = rtl8152_get_drvinfo, .get_link = ethtool_op_get_link, @@ -4973,6 +5300,10 @@ static const struct ethtool_ops ops = { .set_eee = rtl_ethtool_set_eee, .get_link_ksettings = rtl8152_get_link_ksettings, .set_link_ksettings = rtl8152_set_link_ksettings, + .get_tunable = rtl8152_get_tunable, + .set_tunable = rtl8152_set_tunable, + .get_ringparam = rtl8152_get_ringparam, + .set_ringparam = rtl8152_set_ringparam, }; static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) @@ -5117,6 +5448,9 @@ static int rtl_ops_init(struct r8152 *tp) ops->in_nway = rtl8152_in_nway; ops->hw_phy_cfg = r8152b_hw_phy_cfg; ops->autosuspend_en = rtl_runtime_suspend_enable; + tp->rx_buf_sz = 16 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_100TX; break; case RTL_VER_03: @@ -5130,25 +5464,31 @@ static int rtl_ops_init(struct r8152 *tp) ops->down = rtl8153_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; - ops->eee_set = r8153_set_eee; + ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; + tp->rx_buf_sz = 32 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; case RTL_VER_08: case RTL_VER_09: ops->init = r8153b_init; ops->enable = rtl8153_enable; - ops->disable = rtl8153b_disable; + ops->disable = rtl8153_disable; ops->up = rtl8153b_up; ops->down = rtl8153b_down; ops->unload = rtl8153b_unload; ops->eee_get = r8153_get_eee; - ops->eee_set = r8153b_set_eee; + ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153b_hw_phy_cfg; ops->autosuspend_en = rtl8153b_runtime_enable; + tp->rx_buf_sz = 32 * 1024; + tp->eee_en = true; + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; default: @@ -5270,6 +5610,8 @@ static int rtl8152_probe(struct usb_interface *intf, mutex_init(&tp->control); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t); + tasklet_init(&tp->tx_tl, bottom_half, (unsigned long)tp); + tasklet_disable(&tp->tx_tl); netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; @@ -5320,9 +5662,18 @@ static int rtl8152_probe(struct usb_interface *intf, tp->mii.phy_id = R8152_PHY_ID; tp->autoneg = AUTONEG_ENABLE; - tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; + tp->speed = SPEED_100; + tp->advertising = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | + RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; + if (tp->mii.supports_gmii) { + tp->speed = SPEED_1000; + tp->advertising |= RTL_ADVERTISED_1000_FULL; + } tp->duplex = DUPLEX_FULL; + tp->rx_copybreak = RTL8152_RXFG_HEADSZ; + tp->rx_pending = 10 * RTL8152_MAX_RX; + intf->needs_remote_wakeup = 1; tp->rtl_ops.init(tp); @@ -5352,6 +5703,7 @@ static int rtl8152_probe(struct usb_interface *intf, return 0; out1: + tasklet_kill(&tp->tx_tl); usb_set_intfdata(intf, NULL); out: free_netdev(netdev); @@ -5367,6 +5719,7 @@ static void rtl8152_disconnect(struct usb_interface *intf) rtl_set_unplug(tp); unregister_netdev(tp->netdev); + tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); tp->rtl_ops.unload(tp); free_netdev(tp->netdev); diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 98f33e270af1..13e51ccf0214 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -586,8 +586,7 @@ static void free_skb_pool(rtl8150_t *dev) int i; for (i = 0; i < RX_SKB_POOL_SIZE; i++) - if (dev->rx_skb_pool[i]) - dev_kfree_skb(dev->rx_skb_pool[i]); + dev_kfree_skb(dev->rx_skb_pool[i]); } static void rx_fixup(unsigned long data) @@ -946,8 +945,7 @@ static void rtl8150_disconnect(struct usb_interface *intf) unlink_all_urbs(dev); free_all_urbs(dev); free_skb_pool(dev); - if (dev->rx_skb) - dev_kfree_skb(dev->rx_skb); + dev_kfree_skb(dev->rx_skb); kfree(dev->intr_buff); free_netdev(dev->netdev); } diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 1417a22962a1..9556d431885f 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -661,8 +661,7 @@ static void smsc75xx_status(struct usbnet *dev, struct urb *urb) return; } - memcpy(&intdata, urb->transfer_buffer, 4); - le32_to_cpus(&intdata); + intdata = get_unaligned_le32(urb->transfer_buffer); netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata); @@ -2181,12 +2180,10 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) struct sk_buff *ax_skb; unsigned char *packet; - memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a)); - le32_to_cpus(&rx_cmd_a); + rx_cmd_a = get_unaligned_le32(skb->data); skb_pull(skb, 4); - memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); - le32_to_cpus(&rx_cmd_b); + rx_cmd_b = get_unaligned_le32(skb->data); skb_pull(skb, 4 + RXW_PADDING); packet = skb->data; @@ -2258,6 +2255,7 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { u32 tx_cmd_a, tx_cmd_b; + void *ptr; if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) { dev_kfree_skb_any(skb); @@ -2278,13 +2276,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, tx_cmd_b = 0; } - skb_push(skb, 4); - cpu_to_le32s(&tx_cmd_b); - memcpy(skb->data, &tx_cmd_b, 4); - - skb_push(skb, 4); - cpu_to_le32s(&tx_cmd_a); - memcpy(skb->data, &tx_cmd_a, 4); + ptr = skb_push(skb, 8); + put_unaligned_le32(tx_cmd_a, ptr); + put_unaligned_le32(tx_cmd_b, ptr + 4); return skb; } diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 35f39f23d881..c5d4a0060124 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -115,6 +115,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb, u32 padbytes = 0xffff0000; u32 packet_len; int padlen; + void *ptr; padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4; @@ -133,14 +134,12 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb, return NULL; } - skb_push(skb, 4); + ptr = skb_push(skb, 4); packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4); - cpu_to_le32s(&packet_len); - skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); + put_unaligned_le32(packet_len, ptr); if (padlen) { - cpu_to_le32s(&padbytes); - memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); + put_unaligned_le32(padbytes, skb_tail_pointer(skb)); skb_put(skb, sizeof(padbytes)); } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 72514c46b478..58952a79b05f 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1324,11 +1324,11 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) total_len += skb_headlen(skb); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - struct skb_frag_struct *f = &skb_shinfo(skb)->frags[i]; + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; total_len += skb_frag_size(f); - sg_set_page(&urb->sg[i + s], f->page.p, f->size, - f->page_offset); + sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f), + skb_frag_off(f)); } urb->transfer_buffer_length = total_len; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 2a1918f25e47..216acf37ca7c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -657,13 +657,12 @@ static void vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, struct vmxnet3_rx_buf_info *rbi) { - struct skb_frag_struct *frag = skb_shinfo(skb)->frags + - skb_shinfo(skb)->nr_frags; + skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); __skb_frag_set_page(frag, rbi->page); - frag->page_offset = 0; + skb_frag_off_set(frag, 0); skb_frag_size_set(frag, rcd->len); skb->data_len += rcd->len; skb->truesize += PAGE_SIZE; @@ -755,7 +754,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; u32 buf_size; buf_offset = 0; @@ -956,7 +955,7 @@ static int txd_estimate(const struct sk_buff *skb) int i; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; count += VMXNET3_TXD_NEEDED(skb_frag_size(frag)); } diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 09fdd619ac67..dd1a147f2971 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -267,20 +267,6 @@ config FARSYNC To compile this driver as a module, choose M here: the module will be called farsync. -config DSCC4 - tristate "Etinc PCISYNC serial board support" - depends on HDLC && PCI && m - help - Driver for Etinc PCISYNC boards based on the Infineon (ex. Siemens) - DSCC4 chipset. - - This is supposed to work with the four port card. Take a look at - <http://www.cogenit.fr/dscc4/> for further information about the - driver. - - To compile this driver as a module, choose M here: the - module will be called dscc4. - config FSL_UCC_HDLC tristate "Freescale QUICC Engine HDLC support" depends on HDLC diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 9532e69fda87..701f5d2fe3b6 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -18,7 +18,6 @@ obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o obj-$(CONFIG_COSA) += cosa.o obj-$(CONFIG_FARSYNC) += farsync.o -obj-$(CONFIG_DSCC4) += dscc4.o obj-$(CONFIG_X25_ASY) += x25_asy.o obj-$(CONFIG_LANMEDIA) += lmc/ diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c deleted file mode 100644 index fa78d2b14136..000000000000 --- a/drivers/net/wan/dscc4.c +++ /dev/null @@ -1,2057 +0,0 @@ -/* - * drivers/net/wan/dscc4/dscc4.c: a DSCC4 HDLC driver for Linux - * - * This software may be used and distributed according to the terms of the - * GNU General Public License. - * - * The author may be reached as romieu@cogenit.fr. - * Specific bug reports/asian food will be welcome. - * - * Special thanks to the nice people at CS-Telecom for the hardware and the - * access to the test/measure tools. - * - * - * Theory of Operation - * - * I. Board Compatibility - * - * This device driver is designed for the Siemens PEB20534 4 ports serial - * controller as found on Etinc PCISYNC cards. The documentation for the - * chipset is available at http://www.infineon.com: - * - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with - * 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1"; - * - Application Hint "Management of DSCC4 on-chip FIFO resources". - * - Errata sheet DS5 (courtesy of Michael Skerritt). - * Jens David has built an adapter based on the same chipset. Take a look - * at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific - * driver. - * Sample code (2 revisions) is available at Infineon. - * - * II. Board-specific settings - * - * Pcisync can transmit some clock signal to the outside world on the - * *first two* ports provided you put a quartz and a line driver on it and - * remove the jumpers. The operation is described on Etinc web site. If you - * go DCE on these ports, don't forget to use an adequate cable. - * - * Sharing of the PCI interrupt line for this board is possible. - * - * III. Driver operation - * - * The rx/tx operations are based on a linked list of descriptors. The driver - * doesn't use HOLD mode any more. HOLD mode is definitely buggy and the more - * I tried to fix it, the more it started to look like (convoluted) software - * mutation of LxDA method. Errata sheet DS5 suggests to use LxDA: consider - * this a rfc2119 MUST. - * - * Tx direction - * When the tx ring is full, the xmit routine issues a call to netdev_stop. - * The device is supposed to be enabled again during an ALLS irq (we could - * use HI but as it's easy to lose events, it's fscked). - * - * Rx direction - * The received frames aren't supposed to span over multiple receiving areas. - * I may implement it some day but it isn't the highest ranked item. - * - * IV. Notes - * The current error (XDU, RFO) recovery code is untested. - * So far, RDO takes his RX channel down and the right sequence to enable it - * again is still a mystery. If RDO happens, plan a reboot. More details - * in the code (NB: as this happens, TX still works). - * Don't mess the cables during operation, especially on DTE ports. I don't - * suggest it for DCE either but at least one can get some messages instead - * of a complete instant freeze. - * Tests are done on Rev. 20 of the silicium. The RDO handling changes with - * the documentation/chipset releases. - * - * TODO: - * - test X25. - * - use polling at high irq/s, - * - performance analysis, - * - endianness. - * - * 2001/12/10 Daniela Squassoni <daniela@cyclades.com> - * - Contribution to support the new generic HDLC layer. - * - * 2002/01 Ueimor - * - old style interface removal - * - dscc4_release_ring fix (related to DMA mapping) - * - hard_start_xmit fix (hint: TxSizeMax) - * - misc crapectomy. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/list.h> -#include <linux/ioport.h> -#include <linux/pci.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/slab.h> - -#include <asm/cache.h> -#include <asm/byteorder.h> -#include <linux/uaccess.h> -#include <asm/io.h> -#include <asm/irq.h> - -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/string.h> - -#include <linux/if_arp.h> -#include <linux/netdevice.h> -#include <linux/skbuff.h> -#include <linux/delay.h> -#include <linux/hdlc.h> -#include <linux/mutex.h> - -/* Version */ -static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n"; -static int debug; -static int quartz; - -#ifdef CONFIG_DSCC4_PCI_RST -static DEFINE_MUTEX(dscc4_mutex); -static u32 dscc4_pci_config_store[16]; -#endif - -#define DRV_NAME "dscc4" - -#undef DSCC4_POLLING - -/* Module parameters */ - -MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>"); -MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller"); -MODULE_LICENSE("GPL"); -module_param(debug, int, 0); -MODULE_PARM_DESC(debug,"Enable/disable extra messages"); -module_param(quartz, int, 0); -MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)"); - -/* Structures */ - -struct thingie { - int define; - u32 bits; -}; - -struct TxFD { - __le32 state; - __le32 next; - __le32 data; - __le32 complete; - u32 jiffies; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */ - /* FWIW, datasheet calls that "dummy" and says that card - * never looks at it; neither does the driver */ -}; - -struct RxFD { - __le32 state1; - __le32 next; - __le32 data; - __le32 state2; - __le32 end; -}; - -#define DUMMY_SKB_SIZE 64 -#define TX_LOW 8 -#define TX_RING_SIZE 32 -#define RX_RING_SIZE 32 -#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD) -#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD) -#define IRQ_RING_SIZE 64 /* Keep it a multiple of 32 */ -#define TX_TIMEOUT (HZ/10) -#define DSCC4_HZ_MAX 33000000 -#define BRR_DIVIDER_MAX 64*0x00004000 /* Cf errata DS5 p.10 */ -#define dev_per_card 4 -#define SCC_REGISTERS_MAX 23 /* Cf errata DS5 p.4 */ - -#define SOURCE_ID(flags) (((flags) >> 28) & 0x03) -#define TO_SIZE(state) (((state) >> 16) & 0x1fff) - -/* - * Given the operating range of Linux HDLC, the 2 defines below could be - * made simpler. However they are a fine reminder for the limitations of - * the driver: it's better to stay < TxSizeMax and < RxSizeMax. - */ -#define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16) -#define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16) -#define RX_MAX(len) ((((len) >> 5) + 1) << 5) /* Cf RLCR */ -#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET) - -struct dscc4_pci_priv { - __le32 *iqcfg; - int cfg_cur; - spinlock_t lock; - struct pci_dev *pdev; - - struct dscc4_dev_priv *root; - dma_addr_t iqcfg_dma; - u32 xtal_hz; -}; - -struct dscc4_dev_priv { - struct sk_buff *rx_skbuff[RX_RING_SIZE]; - struct sk_buff *tx_skbuff[TX_RING_SIZE]; - - struct RxFD *rx_fd; - struct TxFD *tx_fd; - __le32 *iqrx; - __le32 *iqtx; - - /* FIXME: check all the volatile are required */ - volatile u32 tx_current; - u32 rx_current; - u32 iqtx_current; - u32 iqrx_current; - - volatile u32 tx_dirty; - volatile u32 ltda; - u32 rx_dirty; - u32 lrda; - - dma_addr_t tx_fd_dma; - dma_addr_t rx_fd_dma; - dma_addr_t iqtx_dma; - dma_addr_t iqrx_dma; - - u32 scc_regs[SCC_REGISTERS_MAX]; /* Cf errata DS5 p.4 */ - - struct dscc4_pci_priv *pci_priv; - spinlock_t lock; - - int dev_id; - volatile u32 flags; - u32 timer_help; - - unsigned short encoding; - unsigned short parity; - struct net_device *dev; - sync_serial_settings settings; - void __iomem *base_addr; - u32 __pad __attribute__ ((aligned (4))); -}; - -/* GLOBAL registers definitions */ -#define GCMDR 0x00 -#define GSTAR 0x04 -#define GMODE 0x08 -#define IQLENR0 0x0C -#define IQLENR1 0x10 -#define IQRX0 0x14 -#define IQTX0 0x24 -#define IQCFG 0x3c -#define FIFOCR1 0x44 -#define FIFOCR2 0x48 -#define FIFOCR3 0x4c -#define FIFOCR4 0x34 -#define CH0CFG 0x50 -#define CH0BRDA 0x54 -#define CH0BTDA 0x58 -#define CH0FRDA 0x98 -#define CH0FTDA 0xb0 -#define CH0LRDA 0xc8 -#define CH0LTDA 0xe0 - -/* SCC registers definitions */ -#define SCC_START 0x0100 -#define SCC_OFFSET 0x80 -#define CMDR 0x00 -#define STAR 0x04 -#define CCR0 0x08 -#define CCR1 0x0c -#define CCR2 0x10 -#define BRR 0x2C -#define RLCR 0x40 -#define IMR 0x54 -#define ISR 0x58 - -#define GPDIR 0x0400 -#define GPDATA 0x0404 -#define GPIM 0x0408 - -/* Bit masks */ -#define EncodingMask 0x00700000 -#define CrcMask 0x00000003 - -#define IntRxScc0 0x10000000 -#define IntTxScc0 0x01000000 - -#define TxPollCmd 0x00000400 -#define RxActivate 0x08000000 -#define MTFi 0x04000000 -#define Rdr 0x00400000 -#define Rdt 0x00200000 -#define Idr 0x00100000 -#define Idt 0x00080000 -#define TxSccRes 0x01000000 -#define RxSccRes 0x00010000 -#define TxSizeMax 0x1fff /* Datasheet DS1 - 11.1.1.1 */ -#define RxSizeMax 0x1ffc /* Datasheet DS1 - 11.1.2.1 */ - -#define Ccr0ClockMask 0x0000003f -#define Ccr1LoopMask 0x00000200 -#define IsrMask 0x000fffff -#define BrrExpMask 0x00000f00 -#define BrrMultMask 0x0000003f -#define EncodingMask 0x00700000 -#define Hold cpu_to_le32(0x40000000) -#define SccBusy 0x10000000 -#define PowerUp 0x80000000 -#define Vis 0x00001000 -#define FrameOk (FrameVfr | FrameCrc) -#define FrameVfr 0x80 -#define FrameRdo 0x40 -#define FrameCrc 0x20 -#define FrameRab 0x10 -#define FrameAborted cpu_to_le32(0x00000200) -#define FrameEnd cpu_to_le32(0x80000000) -#define DataComplete cpu_to_le32(0x40000000) -#define LengthCheck 0x00008000 -#define SccEvt 0x02000000 -#define NoAck 0x00000200 -#define Action 0x00000001 -#define HiDesc cpu_to_le32(0x20000000) - -/* SCC events */ -#define RxEvt 0xf0000000 -#define TxEvt 0x0f000000 -#define Alls 0x00040000 -#define Xdu 0x00010000 -#define Cts 0x00004000 -#define Xmr 0x00002000 -#define Xpr 0x00001000 -#define Rdo 0x00000080 -#define Rfs 0x00000040 -#define Cd 0x00000004 -#define Rfo 0x00000002 -#define Flex 0x00000001 - -/* DMA core events */ -#define Cfg 0x00200000 -#define Hi 0x00040000 -#define Fi 0x00020000 -#define Err 0x00010000 -#define Arf 0x00000002 -#define ArAck 0x00000001 - -/* State flags */ -#define Ready 0x00000000 -#define NeedIDR 0x00000001 -#define NeedIDT 0x00000002 -#define RdoSet 0x00000004 -#define FakeReset 0x00000008 - -/* Don't mask RDO. Ever. */ -#ifdef DSCC4_POLLING -#define EventsMask 0xfffeef7f -#else -#define EventsMask 0xfffa8f7a -#endif - -/* Functions prototypes */ -static void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *); -static void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *); -static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr); -static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent); -static int dscc4_open(struct net_device *); -static netdev_tx_t dscc4_start_xmit(struct sk_buff *, - struct net_device *); -static int dscc4_close(struct net_device *); -static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static int dscc4_init_ring(struct net_device *); -static void dscc4_release_ring(struct dscc4_dev_priv *); -static void dscc4_tx_timeout(struct net_device *); -static irqreturn_t dscc4_irq(int irq, void *dev_id); -static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short); -static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *); -#ifdef DSCC4_POLLING -static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *); -#endif - -static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev) -{ - return dev_to_hdlc(dev)->priv; -} - -static inline struct net_device *dscc4_to_dev(struct dscc4_dev_priv *p) -{ - return p->dev; -} - -static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv, - struct net_device *dev, int offset) -{ - u32 state; - - /* Cf scc_writel for concern regarding thread-safety */ - state = dpriv->scc_regs[offset >> 2]; - state &= ~mask; - state |= value; - dpriv->scc_regs[offset >> 2] = state; - writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset); -} - -static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv, - struct net_device *dev, int offset) -{ - /* - * Thread-UNsafe. - * As of 2002/02/16, there are no thread racing for access. - */ - dpriv->scc_regs[offset >> 2] = bits; - writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset); -} - -static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset) -{ - return dpriv->scc_regs[offset >> 2]; -} - -static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev) -{ - /* Cf errata DS5 p.4 */ - readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); - return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); -} - -static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - dpriv->ltda = dpriv->tx_fd_dma + - ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD); - writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); - /* Flush posted writes *NOW* */ - readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); -} - -static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - dpriv->lrda = dpriv->rx_fd_dma + - ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD); - writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); -} - -static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv) -{ - return dpriv->tx_current == dpriv->tx_dirty; -} - -static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda; -} - -static int state_check(u32 state, struct dscc4_dev_priv *dpriv, - struct net_device *dev, const char *msg) -{ - int ret = 0; - - if (debug > 1) { - if (SOURCE_ID(state) != dpriv->dev_id) { - printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n", - dev->name, msg, SOURCE_ID(state), state); - ret = -1; - } - if (state & 0x0df80c00) { - printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n", - dev->name, msg, state); - ret = -1; - } - } - return ret; -} - -static void dscc4_tx_print(struct net_device *dev, - struct dscc4_dev_priv *dpriv, - char *msg) -{ - printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n", - dev->name, dpriv->tx_current, dpriv->tx_dirty, msg); -} - -static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) -{ - struct device *d = &dpriv->pci_priv->pdev->dev; - struct TxFD *tx_fd = dpriv->tx_fd; - struct RxFD *rx_fd = dpriv->rx_fd; - struct sk_buff **skbuff; - int i; - - dma_free_coherent(d, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma); - dma_free_coherent(d, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); - - skbuff = dpriv->tx_skbuff; - for (i = 0; i < TX_RING_SIZE; i++) { - if (*skbuff) { - dma_unmap_single(d, le32_to_cpu(tx_fd->data), - (*skbuff)->len, DMA_TO_DEVICE); - dev_kfree_skb(*skbuff); - } - skbuff++; - tx_fd++; - } - - skbuff = dpriv->rx_skbuff; - for (i = 0; i < RX_RING_SIZE; i++) { - if (*skbuff) { - dma_unmap_single(d, le32_to_cpu(rx_fd->data), - RX_MAX(HDLC_MAX_MRU), - DMA_FROM_DEVICE); - dev_kfree_skb(*skbuff); - } - skbuff++; - rx_fd++; - } -} - -static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; - struct device *d = &dpriv->pci_priv->pdev->dev; - struct RxFD *rx_fd = dpriv->rx_fd + dirty; - const int len = RX_MAX(HDLC_MAX_MRU); - struct sk_buff *skb; - dma_addr_t addr; - - skb = dev_alloc_skb(len); - if (!skb) - goto err_out; - - skb->protocol = hdlc_type_trans(skb, dev); - addr = dma_map_single(d, skb->data, len, DMA_FROM_DEVICE); - if (dma_mapping_error(d, addr)) - goto err_free_skb; - - dpriv->rx_skbuff[dirty] = skb; - rx_fd->data = cpu_to_le32(addr); - return 0; - -err_free_skb: - dev_kfree_skb_any(skb); -err_out: - rx_fd->data = 0; - return -1; -} - -/* - * IRQ/thread/whatever safe - */ -static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv, - struct net_device *dev, char *msg) -{ - s8 i = 0; - - do { - if (!(scc_readl_star(dpriv, dev) & SccBusy)) { - printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name, - msg, i); - goto done; - } - schedule_timeout_uninterruptible(msecs_to_jiffies(100)); - rmb(); - } while (++i > 0); - netdev_err(dev, "%s timeout\n", msg); -done: - return (i >= 0) ? i : -EAGAIN; -} - -static int dscc4_do_action(struct net_device *dev, char *msg) -{ - void __iomem *ioaddr = dscc4_priv(dev)->base_addr; - s16 i = 0; - - writel(Action, ioaddr + GCMDR); - ioaddr += GSTAR; - do { - u32 state = readl(ioaddr); - - if (state & ArAck) { - netdev_dbg(dev, "%s ack\n", msg); - writel(ArAck, ioaddr); - goto done; - } else if (state & Arf) { - netdev_err(dev, "%s failed\n", msg); - writel(Arf, ioaddr); - i = -1; - goto done; - } - rmb(); - } while (++i > 0); - netdev_err(dev, "%s timeout\n", msg); -done: - return i; -} - -static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv) -{ - int cur = dpriv->iqtx_current%IRQ_RING_SIZE; - s8 i = 0; - - do { - if (!(dpriv->flags & (NeedIDR | NeedIDT)) || - (dpriv->iqtx[cur] & cpu_to_le32(Xpr))) - break; - smp_rmb(); - schedule_timeout_uninterruptible(msecs_to_jiffies(100)); - } while (++i > 0); - - return (i >= 0 ) ? i : -EAGAIN; -} - -#if 0 /* dscc4_{rx/tx}_reset are both unreliable - more tweak needed */ -static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev) -{ - unsigned long flags; - - spin_lock_irqsave(&dpriv->pci_priv->lock, flags); - /* Cf errata DS5 p.6 */ - writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); - scc_patchl(PowerUp, 0, dpriv, dev, CCR0); - readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); - writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG); - writel(Action, dpriv->base_addr + GCMDR); - spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags); -} - -#endif - -#if 0 -static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev) -{ - u16 i = 0; - - /* Cf errata DS5 p.7 */ - scc_patchl(PowerUp, 0, dpriv, dev, CCR0); - scc_writel(0x00050000, dpriv, dev, CCR2); - /* - * Must be longer than the time required to fill the fifo. - */ - while (!dscc4_tx_quiescent(dpriv, dev) && ++i) { - udelay(1); - wmb(); - } - - writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG); - if (dscc4_do_action(dev, "Rdt") < 0) - netdev_err(dev, "Tx reset failed\n"); -} -#endif - -/* TODO: (ab)use this function to refill a completely depleted RX ring. */ -static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE; - struct device *d = &dpriv->pci_priv->pdev->dev; - struct sk_buff *skb; - int pkt_len; - - skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; - if (!skb) { - printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__); - goto refill; - } - pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2)); - dma_unmap_single(d, le32_to_cpu(rx_fd->data), - RX_MAX(HDLC_MAX_MRU), DMA_FROM_DEVICE); - if ((skb->data[--pkt_len] & FrameOk) == FrameOk) { - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - skb_put(skb, pkt_len); - if (netif_running(dev)) - skb->protocol = hdlc_type_trans(skb, dev); - netif_rx(skb); - } else { - if (skb->data[pkt_len] & FrameRdo) - dev->stats.rx_fifo_errors++; - else if (!(skb->data[pkt_len] & FrameCrc)) - dev->stats.rx_crc_errors++; - else if ((skb->data[pkt_len] & (FrameVfr | FrameRab)) != - (FrameVfr | FrameRab)) - dev->stats.rx_length_errors++; - dev->stats.rx_errors++; - dev_kfree_skb_irq(skb); - } -refill: - while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) { - if (try_get_rx_skb(dpriv, dev) < 0) - break; - dpriv->rx_dirty++; - } - dscc4_rx_update(dpriv, dev); - rx_fd->state2 = 0x00000000; - rx_fd->end = cpu_to_le32(0xbabeface); -} - -static void dscc4_free1(struct pci_dev *pdev) -{ - struct dscc4_pci_priv *ppriv; - struct dscc4_dev_priv *root; - int i; - - ppriv = pci_get_drvdata(pdev); - root = ppriv->root; - - for (i = 0; i < dev_per_card; i++) - unregister_hdlc_device(dscc4_to_dev(root + i)); - - for (i = 0; i < dev_per_card; i++) - free_netdev(root[i].dev); - kfree(root); - kfree(ppriv); -} - -static int dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct dscc4_pci_priv *priv; - struct dscc4_dev_priv *dpriv; - void __iomem *ioaddr; - int i, rc; - - printk(KERN_DEBUG "%s", version); - - rc = pci_enable_device(pdev); - if (rc < 0) - goto out; - - rc = pci_request_region(pdev, 0, "registers"); - if (rc < 0) { - pr_err("can't reserve MMIO region (regs)\n"); - goto err_disable_0; - } - rc = pci_request_region(pdev, 1, "LBI interface"); - if (rc < 0) { - pr_err("can't reserve MMIO region (lbi)\n"); - goto err_free_mmio_region_1; - } - - ioaddr = pci_ioremap_bar(pdev, 0); - if (!ioaddr) { - pr_err("cannot remap MMIO region %llx @ %llx\n", - (unsigned long long)pci_resource_len(pdev, 0), - (unsigned long long)pci_resource_start(pdev, 0)); - rc = -EIO; - goto err_free_mmio_regions_2; - } - printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n", - (unsigned long long)pci_resource_start(pdev, 0), - (unsigned long long)pci_resource_start(pdev, 1), pdev->irq); - - /* Cf errata DS5 p.2 */ - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8); - pci_set_master(pdev); - - rc = dscc4_found1(pdev, ioaddr); - if (rc < 0) - goto err_iounmap_3; - - priv = pci_get_drvdata(pdev); - - rc = request_irq(pdev->irq, dscc4_irq, IRQF_SHARED, DRV_NAME, priv->root); - if (rc < 0) { - pr_warn("IRQ %d busy\n", pdev->irq); - goto err_release_4; - } - - /* power up/little endian/dma core controlled via lrda/ltda */ - writel(0x00000001, ioaddr + GMODE); - /* Shared interrupt queue */ - { - u32 bits; - - bits = (IRQ_RING_SIZE >> 5) - 1; - bits |= bits << 4; - bits |= bits << 8; - bits |= bits << 16; - writel(bits, ioaddr + IQLENR0); - } - /* Global interrupt queue */ - writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); - - rc = -ENOMEM; - - priv->iqcfg = (__le32 *)dma_alloc_coherent(&pdev->dev, - IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma, GFP_KERNEL); - if (!priv->iqcfg) - goto err_free_irq_5; - writel(priv->iqcfg_dma, ioaddr + IQCFG); - - /* - * SCC 0-3 private rx/tx irq structures - * IQRX/TXi needs to be set soon. Learned it the hard way... - */ - for (i = 0; i < dev_per_card; i++) { - dpriv = priv->root + i; - dpriv->iqtx = (__le32 *)dma_alloc_coherent(&pdev->dev, - IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma, - GFP_KERNEL); - if (!dpriv->iqtx) - goto err_free_iqtx_6; - writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4); - } - for (i = 0; i < dev_per_card; i++) { - dpriv = priv->root + i; - dpriv->iqrx = (__le32 *)dma_alloc_coherent(&pdev->dev, - IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma, - GFP_KERNEL); - if (!dpriv->iqrx) - goto err_free_iqrx_7; - writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4); - } - - /* Cf application hint. Beware of hard-lock condition on threshold. */ - writel(0x42104000, ioaddr + FIFOCR1); - //writel(0x9ce69800, ioaddr + FIFOCR2); - writel(0xdef6d800, ioaddr + FIFOCR2); - //writel(0x11111111, ioaddr + FIFOCR4); - writel(0x18181818, ioaddr + FIFOCR4); - // FIXME: should depend on the chipset revision - writel(0x0000000e, ioaddr + FIFOCR3); - - writel(0xff200001, ioaddr + GCMDR); - - rc = 0; -out: - return rc; - -err_free_iqrx_7: - while (--i >= 0) { - dpriv = priv->root + i; - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqrx, dpriv->iqrx_dma); - } - i = dev_per_card; -err_free_iqtx_6: - while (--i >= 0) { - dpriv = priv->root + i; - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqtx, dpriv->iqtx_dma); - } - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg, - priv->iqcfg_dma); -err_free_irq_5: - free_irq(pdev->irq, priv->root); -err_release_4: - dscc4_free1(pdev); -err_iounmap_3: - iounmap (ioaddr); -err_free_mmio_regions_2: - pci_release_region(pdev, 1); -err_free_mmio_region_1: - pci_release_region(pdev, 0); -err_disable_0: - pci_disable_device(pdev); - goto out; -}; - -/* - * Let's hope the default values are decent enough to protect my - * feet from the user's gun - Ueimor - */ -static void dscc4_init_registers(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - /* No interrupts, SCC core disabled. Let's relax */ - scc_writel(0x00000000, dpriv, dev, CCR0); - - scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR); - - /* - * No address recognition/crc-CCITT/cts enabled - * Shared flags transmission disabled - cf errata DS5 p.11 - * Carrier detect disabled - cf errata p.14 - * FIXME: carrier detection/polarity may be handled more gracefully. - */ - scc_writel(0x02408000, dpriv, dev, CCR1); - - /* crc not forwarded - Cf errata DS5 p.11 */ - scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2); - // crc forwarded - //scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2); -} - -static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz) -{ - int ret = 0; - - if ((hz < 0) || (hz > DSCC4_HZ_MAX)) - ret = -EOPNOTSUPP; - else - dpriv->pci_priv->xtal_hz = hz; - - return ret; -} - -static const struct net_device_ops dscc4_ops = { - .ndo_open = dscc4_open, - .ndo_stop = dscc4_close, - .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = dscc4_ioctl, - .ndo_tx_timeout = dscc4_tx_timeout, -}; - -static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) -{ - struct dscc4_pci_priv *ppriv; - struct dscc4_dev_priv *root; - int i, ret = -ENOMEM; - - root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL); - if (!root) - goto err_out; - - for (i = 0; i < dev_per_card; i++) { - root[i].dev = alloc_hdlcdev(root + i); - if (!root[i].dev) - goto err_free_dev; - } - - ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL); - if (!ppriv) - goto err_free_dev; - - ppriv->root = root; - spin_lock_init(&ppriv->lock); - - for (i = 0; i < dev_per_card; i++) { - struct dscc4_dev_priv *dpriv = root + i; - struct net_device *d = dscc4_to_dev(dpriv); - hdlc_device *hdlc = dev_to_hdlc(d); - - d->base_addr = (unsigned long)ioaddr; - d->irq = pdev->irq; - d->netdev_ops = &dscc4_ops; - d->watchdog_timeo = TX_TIMEOUT; - SET_NETDEV_DEV(d, &pdev->dev); - - dpriv->dev_id = i; - dpriv->pci_priv = ppriv; - dpriv->base_addr = ioaddr; - spin_lock_init(&dpriv->lock); - - hdlc->xmit = dscc4_start_xmit; - hdlc->attach = dscc4_hdlc_attach; - - dscc4_init_registers(dpriv, d); - dpriv->parity = PARITY_CRC16_PR0_CCITT; - dpriv->encoding = ENCODING_NRZ; - - ret = dscc4_init_ring(d); - if (ret < 0) - goto err_unregister; - - ret = register_hdlc_device(d); - if (ret < 0) { - pr_err("unable to register\n"); - dscc4_release_ring(dpriv); - goto err_unregister; - } - } - - ret = dscc4_set_quartz(root, quartz); - if (ret < 0) - goto err_unregister; - - pci_set_drvdata(pdev, ppriv); - return ret; - -err_unregister: - while (i-- > 0) { - dscc4_release_ring(root + i); - unregister_hdlc_device(dscc4_to_dev(root + i)); - } - kfree(ppriv); - i = dev_per_card; -err_free_dev: - while (i-- > 0) - free_netdev(root[i].dev); - kfree(root); -err_out: - return ret; -}; - -static void dscc4_tx_timeout(struct net_device *dev) -{ - /* FIXME: something is missing there */ -} - -static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv) -{ - sync_serial_settings *settings = &dpriv->settings; - - if (settings->loopback && (settings->clock_type != CLOCK_INT)) { - struct net_device *dev = dscc4_to_dev(dpriv); - - netdev_info(dev, "loopback requires clock\n"); - return -1; - } - return 0; -} - -#ifdef CONFIG_DSCC4_PCI_RST -/* - * Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together - * so as to provide a safe way to reset the asic while not the whole machine - * rebooting. - * - * This code doesn't need to be efficient. Keep It Simple - */ -static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) -{ - int i; - - mutex_lock(&dscc4_mutex); - for (i = 0; i < 16; i++) - pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i); - - /* Maximal LBI clock divider (who cares ?) and whole GPIO range. */ - writel(0x001c0000, ioaddr + GMODE); - /* Configure GPIO port as output */ - writel(0x0000ffff, ioaddr + GPDIR); - /* Disable interruption */ - writel(0x0000ffff, ioaddr + GPIM); - - writel(0x0000ffff, ioaddr + GPDATA); - writel(0x00000000, ioaddr + GPDATA); - - /* Flush posted writes */ - readl(ioaddr + GSTAR); - - schedule_timeout_uninterruptible(msecs_to_jiffies(100)); - - for (i = 0; i < 16; i++) - pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); - mutex_unlock(&dscc4_mutex); -} -#else -#define dscc4_pci_reset(pdev,ioaddr) do {} while (0) -#endif /* CONFIG_DSCC4_PCI_RST */ - -static int dscc4_open(struct net_device *dev) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - int ret = -EAGAIN; - - if ((dscc4_loopback_check(dpriv) < 0)) - goto err; - - if ((ret = hdlc_open(dev))) - goto err; - - /* - * Due to various bugs, there is no way to reliably reset a - * specific port (manufacturer's dependent special PCI #RST wiring - * apart: it affects all ports). Thus the device goes in the best - * silent mode possible at dscc4_close() time and simply claims to - * be up if it's opened again. It still isn't possible to change - * the HDLC configuration without rebooting but at least the ports - * can be up/down ifconfig'ed without killing the host. - */ - if (dpriv->flags & FakeReset) { - dpriv->flags &= ~FakeReset; - scc_patchl(0, PowerUp, dpriv, dev, CCR0); - scc_patchl(0, 0x00050000, dpriv, dev, CCR2); - scc_writel(EventsMask, dpriv, dev, IMR); - netdev_info(dev, "up again\n"); - goto done; - } - - /* IDT+IDR during XPR */ - dpriv->flags = NeedIDR | NeedIDT; - - scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0); - - /* - * The following is a bit paranoid... - * - * NB: the datasheet "...CEC will stay active if the SCC is in - * power-down mode or..." and CCR2.RAC = 1 are two different - * situations. - */ - if (scc_readl_star(dpriv, dev) & SccBusy) { - netdev_err(dev, "busy - try later\n"); - ret = -EAGAIN; - goto err_out; - } else - netdev_info(dev, "available - good\n"); - - scc_writel(EventsMask, dpriv, dev, IMR); - - /* Posted write is flushed in the wait_ack loop */ - scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR); - - if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0) - goto err_disable_scc_events; - - /* - * I would expect XPR near CE completion (before ? after ?). - * At worst, this code won't see a late XPR and people - * will have to re-issue an ifconfig (this is harmless). - * WARNING, a really missing XPR usually means a hardware - * reset is needed. Suggestions anyone ? - */ - if ((ret = dscc4_xpr_ack(dpriv)) < 0) { - pr_err("XPR timeout\n"); - goto err_disable_scc_events; - } - - if (debug > 2) - dscc4_tx_print(dev, dpriv, "Open"); - -done: - netif_start_queue(dev); - - netif_carrier_on(dev); - - return 0; - -err_disable_scc_events: - scc_writel(0xffffffff, dpriv, dev, IMR); - scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); -err_out: - hdlc_close(dev); -err: - return ret; -} - -#ifdef DSCC4_POLLING -static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev) -{ - /* FIXME: it's gonna be easy (TM), for sure */ -} -#endif /* DSCC4_POLLING */ - -static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - struct device *d = &dpriv->pci_priv->pdev->dev; - struct TxFD *tx_fd; - dma_addr_t addr; - int next; - - addr = dma_map_single(d, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(d, addr)) { - dev_kfree_skb_any(skb); - dev->stats.tx_dropped++; - return NETDEV_TX_OK; - } - - next = dpriv->tx_current%TX_RING_SIZE; - dpriv->tx_skbuff[next] = skb; - tx_fd = dpriv->tx_fd + next; - tx_fd->state = FrameEnd | TO_STATE_TX(skb->len); - tx_fd->data = cpu_to_le32(addr); - tx_fd->complete = 0x00000000; - tx_fd->jiffies = jiffies; - mb(); - -#ifdef DSCC4_POLLING - spin_lock(&dpriv->lock); - while (dscc4_tx_poll(dpriv, dev)); - spin_unlock(&dpriv->lock); -#endif - - if (debug > 2) - dscc4_tx_print(dev, dpriv, "Xmit"); - /* To be cleaned(unsigned int)/optimized. Later, ok ? */ - if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)) - netif_stop_queue(dev); - - if (dscc4_tx_quiescent(dpriv, dev)) - dscc4_do_tx(dpriv, dev); - - return NETDEV_TX_OK; -} - -static int dscc4_close(struct net_device *dev) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - - netif_stop_queue(dev); - - scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); - scc_patchl(0x00050000, 0, dpriv, dev, CCR2); - scc_writel(0xffffffff, dpriv, dev, IMR); - - dpriv->flags |= FakeReset; - - hdlc_close(dev); - - return 0; -} - -static inline int dscc4_check_clock_ability(int port) -{ - int ret = 0; - -#ifdef CONFIG_DSCC4_PCISYNC - if (port >= 2) - ret = -1; -#endif - return ret; -} - -/* - * DS1 p.137: "There are a total of 13 different clocking modes..." - * ^^ - * Design choices: - * - by default, assume a clock is provided on pin RxClk/TxClk (clock mode 0a). - * Clock mode 3b _should_ work but the testing seems to make this point - * dubious (DIY testing requires setting CCR0 at 0x00000033). - * This is supposed to provide least surprise "DTE like" behavior. - * - if line rate is specified, clocks are assumed to be locally generated. - * A quartz must be available (on pin XTAL1). Modes 6b/7b are used. Choosing - * between these it automagically done according on the required frequency - * scaling. Of course some rounding may take place. - * - no high speed mode (40Mb/s). May be trivial to do but I don't have an - * appropriate external clocking device for testing. - * - no time-slot/clock mode 5: shameless laziness. - * - * The clock signals wiring can be (is ?) manufacturer dependent. Good luck. - * - * BIG FAT WARNING: if the device isn't provided enough clocking signal, it - * won't pass the init sequence. For example, straight back-to-back DTE without - * external clock will fail when dscc4_open() (<- 'ifconfig hdlcx xxx') is - * called. - * - * Typos lurk in datasheet (missing divier in clock mode 7a figure 51 p.153 - * DS0 for example) - * - * Clock mode related bits of CCR0: - * +------------ TOE: output TxClk (0b/2b/3a/3b/6b/7a/7b only) - * | +---------- SSEL: sub-mode select 0 -> a, 1 -> b - * | | +-------- High Speed: say 0 - * | | | +-+-+-- Clock Mode: 0..7 - * | | | | | | - * -+-+-+-+-+-+-+-+ - * x|x|5|4|3|2|1|0| lower bits - * - * Division factor of BRR: k = (N+1)x2^M (total divider = 16xk in mode 6b) - * +-+-+-+------------------ M (0..15) - * | | | | +-+-+-+-+-+-- N (0..63) - * 0 0 0 0 | | | | 0 0 | | | | | | - * ...-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * f|e|d|c|b|a|9|8|7|6|5|4|3|2|1|0| lower bits - * - */ -static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - int ret = -1; - u32 brr; - - *state &= ~Ccr0ClockMask; - if (*bps) { /* Clock generated - required for DCE */ - u32 n = 0, m = 0, divider; - int xtal; - - xtal = dpriv->pci_priv->xtal_hz; - if (!xtal) - goto done; - if (dscc4_check_clock_ability(dpriv->dev_id) < 0) - goto done; - divider = xtal / *bps; - if (divider > BRR_DIVIDER_MAX) { - divider >>= 4; - *state |= 0x00000036; /* Clock mode 6b (BRG/16) */ - } else - *state |= 0x00000037; /* Clock mode 7b (BRG) */ - if (divider >> 22) { - n = 63; - m = 15; - } else if (divider) { - /* Extraction of the 6 highest weighted bits */ - m = 0; - while (0xffffffc0 & divider) { - m++; - divider >>= 1; - } - n = divider; - } - brr = (m << 8) | n; - divider = n << m; - if (!(*state & 0x00000001)) /* ?b mode mask => clock mode 6b */ - divider <<= 4; - *bps = xtal / divider; - } else { - /* - * External clock - DTE - * "state" already reflects Clock mode 0a (CCR0 = 0xzzzzzz00). - * Nothing more to be done - */ - brr = 0; - } - scc_writel(brr, dpriv, dev, BRR); - ret = 0; -done: - return ret; -} - -static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - const size_t size = sizeof(dpriv->settings); - int ret = 0; - - if (dev->flags & IFF_UP) - return -EBUSY; - - if (cmd != SIOCWANDEV) - return -EOPNOTSUPP; - - switch(ifr->ifr_settings.type) { - case IF_GET_IFACE: - ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ - return -ENOBUFS; - } - if (copy_to_user(line, &dpriv->settings, size)) - return -EFAULT; - break; - - case IF_IFACE_SYNC_SERIAL: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (dpriv->flags & FakeReset) { - netdev_info(dev, "please reset the device before this command\n"); - return -EPERM; - } - if (copy_from_user(&dpriv->settings, line, size)) - return -EFAULT; - ret = dscc4_set_iface(dpriv, dev); - break; - - default: - ret = hdlc_ioctl(dev, ifr, cmd); - break; - } - - return ret; -} - -static int dscc4_match(const struct thingie *p, int value) -{ - int i; - - for (i = 0; p[i].define != -1; i++) { - if (value == p[i].define) - break; - } - if (p[i].define == -1) - return -1; - else - return i; -} - -static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - sync_serial_settings *settings = &dpriv->settings; - int ret = -EOPNOTSUPP; - u32 bps, state; - - bps = settings->clock_rate; - state = scc_readl(dpriv, CCR0); - if (dscc4_set_clock(dev, &bps, &state) < 0) - goto done; - if (bps) { /* DCE */ - printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name); - if (settings->clock_rate != bps) { - printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n", - dev->name, settings->clock_rate, bps); - settings->clock_rate = bps; - } - } else { /* DTE */ - state |= PowerUp | Vis; - printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name); - } - scc_writel(state, dpriv, dev, CCR0); - ret = 0; -done: - return ret; -} - -static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - static const struct thingie encoding[] = { - { ENCODING_NRZ, 0x00000000 }, - { ENCODING_NRZI, 0x00200000 }, - { ENCODING_FM_MARK, 0x00400000 }, - { ENCODING_FM_SPACE, 0x00500000 }, - { ENCODING_MANCHESTER, 0x00600000 }, - { -1, 0} - }; - int i, ret = 0; - - i = dscc4_match(encoding, dpriv->encoding); - if (i >= 0) - scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0); - else - ret = -EOPNOTSUPP; - return ret; -} - -static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - sync_serial_settings *settings = &dpriv->settings; - u32 state; - - state = scc_readl(dpriv, CCR1); - if (settings->loopback) { - printk(KERN_DEBUG "%s: loopback\n", dev->name); - state |= 0x00000100; - } else { - printk(KERN_DEBUG "%s: normal\n", dev->name); - state &= ~0x00000100; - } - scc_writel(state, dpriv, dev, CCR1); - return 0; -} - -static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - static const struct thingie crc[] = { - { PARITY_CRC16_PR0_CCITT, 0x00000010 }, - { PARITY_CRC16_PR1_CCITT, 0x00000000 }, - { PARITY_CRC32_PR0_CCITT, 0x00000011 }, - { PARITY_CRC32_PR1_CCITT, 0x00000001 } - }; - int i, ret = 0; - - i = dscc4_match(crc, dpriv->parity); - if (i >= 0) - scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1); - else - ret = -EOPNOTSUPP; - return ret; -} - -static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev) -{ - struct { - int (*action)(struct dscc4_dev_priv *, struct net_device *); - } *p, do_setting[] = { - { dscc4_encoding_setting }, - { dscc4_clock_setting }, - { dscc4_loopback_setting }, - { dscc4_crc_setting }, - { NULL } - }; - int ret = 0; - - for (p = do_setting; p->action; p++) { - if ((ret = p->action(dpriv, dev)) < 0) - break; - } - return ret; -} - -static irqreturn_t dscc4_irq(int irq, void *token) -{ - struct dscc4_dev_priv *root = token; - struct dscc4_pci_priv *priv; - struct net_device *dev; - void __iomem *ioaddr; - u32 state; - unsigned long flags; - int i, handled = 1; - - priv = root->pci_priv; - dev = dscc4_to_dev(root); - - spin_lock_irqsave(&priv->lock, flags); - - ioaddr = root->base_addr; - - state = readl(ioaddr + GSTAR); - if (!state) { - handled = 0; - goto out; - } - if (debug > 3) - printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state); - writel(state, ioaddr + GSTAR); - - if (state & Arf) { - netdev_err(dev, "failure (Arf). Harass the maintainer\n"); - goto out; - } - state &= ~ArAck; - if (state & Cfg) { - if (debug > 0) - printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME); - if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & cpu_to_le32(Arf)) - netdev_err(dev, "CFG failed\n"); - if (!(state &= ~Cfg)) - goto out; - } - if (state & RxEvt) { - i = dev_per_card - 1; - do { - dscc4_rx_irq(priv, root + i); - } while (--i >= 0); - state &= ~RxEvt; - } - if (state & TxEvt) { - i = dev_per_card - 1; - do { - dscc4_tx_irq(priv, root + i); - } while (--i >= 0); - state &= ~TxEvt; - } -out: - spin_unlock_irqrestore(&priv->lock, flags); - return IRQ_RETVAL(handled); -} - -static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, - struct dscc4_dev_priv *dpriv) -{ - struct net_device *dev = dscc4_to_dev(dpriv); - u32 state; - int cur, loop = 0; - -try: - cur = dpriv->iqtx_current%IRQ_RING_SIZE; - state = le32_to_cpu(dpriv->iqtx[cur]); - if (!state) { - if (debug > 4) - printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name, - state); - if ((debug > 1) && (loop > 1)) - printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop); - if (loop && netif_queue_stopped(dev)) - if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE) - netif_wake_queue(dev); - - if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) && - !dscc4_tx_done(dpriv)) - dscc4_do_tx(dpriv, dev); - return; - } - loop++; - dpriv->iqtx[cur] = 0; - dpriv->iqtx_current++; - - if (state_check(state, dpriv, dev, "Tx") < 0) - return; - - if (state & SccEvt) { - if (state & Alls) { - struct sk_buff *skb; - struct TxFD *tx_fd; - - if (debug > 2) - dscc4_tx_print(dev, dpriv, "Alls"); - /* - * DataComplete can't be trusted for Tx completion. - * Cf errata DS5 p.8 - */ - cur = dpriv->tx_dirty%TX_RING_SIZE; - tx_fd = dpriv->tx_fd + cur; - skb = dpriv->tx_skbuff[cur]; - if (skb) { - dma_unmap_single(&ppriv->pdev->dev, - le32_to_cpu(tx_fd->data), - skb->len, DMA_TO_DEVICE); - if (tx_fd->state & FrameEnd) { - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - } - dev_consume_skb_irq(skb); - dpriv->tx_skbuff[cur] = NULL; - ++dpriv->tx_dirty; - } else { - if (debug > 1) - netdev_err(dev, "Tx: NULL skb %d\n", - cur); - } - /* - * If the driver ends sending crap on the wire, it - * will be way easier to diagnose than the (not so) - * random freeze induced by null sized tx frames. - */ - tx_fd->data = tx_fd->next; - tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE); - tx_fd->complete = 0x00000000; - tx_fd->jiffies = 0; - - if (!(state &= ~Alls)) - goto try; - } - /* - * Transmit Data Underrun - */ - if (state & Xdu) { - netdev_err(dev, "Tx Data Underrun. Ask maintainer\n"); - dpriv->flags = NeedIDT; - /* Tx reset */ - writel(MTFi | Rdt, - dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG); - writel(Action, dpriv->base_addr + GCMDR); - return; - } - if (state & Cts) { - netdev_info(dev, "CTS transition\n"); - if (!(state &= ~Cts)) /* DEBUG */ - goto try; - } - if (state & Xmr) { - /* Frame needs to be sent again - FIXME */ - netdev_err(dev, "Tx ReTx. Ask maintainer\n"); - if (!(state &= ~Xmr)) /* DEBUG */ - goto try; - } - if (state & Xpr) { - void __iomem *scc_addr; - unsigned long ring; - unsigned int i; - - /* - * - the busy condition happens (sometimes); - * - it doesn't seem to make the handler unreliable. - */ - for (i = 1; i; i <<= 1) { - if (!(scc_readl_star(dpriv, dev) & SccBusy)) - break; - } - if (!i) - netdev_info(dev, "busy in irq\n"); - - scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; - /* Keep this order: IDT before IDR */ - if (dpriv->flags & NeedIDT) { - if (debug > 2) - dscc4_tx_print(dev, dpriv, "Xpr"); - ring = dpriv->tx_fd_dma + - (dpriv->tx_dirty%TX_RING_SIZE)* - sizeof(struct TxFD); - writel(ring, scc_addr + CH0BTDA); - dscc4_do_tx(dpriv, dev); - writel(MTFi | Idt, scc_addr + CH0CFG); - if (dscc4_do_action(dev, "IDT") < 0) - goto err_xpr; - dpriv->flags &= ~NeedIDT; - } - if (dpriv->flags & NeedIDR) { - ring = dpriv->rx_fd_dma + - (dpriv->rx_current%RX_RING_SIZE)* - sizeof(struct RxFD); - writel(ring, scc_addr + CH0BRDA); - dscc4_rx_update(dpriv, dev); - writel(MTFi | Idr, scc_addr + CH0CFG); - if (dscc4_do_action(dev, "IDR") < 0) - goto err_xpr; - dpriv->flags &= ~NeedIDR; - smp_wmb(); - /* Activate receiver and misc */ - scc_writel(0x08050008, dpriv, dev, CCR2); - } - err_xpr: - if (!(state &= ~Xpr)) - goto try; - } - if (state & Cd) { - if (debug > 0) - netdev_info(dev, "CD transition\n"); - if (!(state &= ~Cd)) /* DEBUG */ - goto try; - } - } else { /* ! SccEvt */ - if (state & Hi) { -#ifdef DSCC4_POLLING - while (!dscc4_tx_poll(dpriv, dev)); -#endif - netdev_info(dev, "Tx Hi\n"); - state &= ~Hi; - } - if (state & Err) { - netdev_info(dev, "Tx ERR\n"); - dev->stats.tx_errors++; - state &= ~Err; - } - } - goto try; -} - -static void dscc4_rx_irq(struct dscc4_pci_priv *priv, - struct dscc4_dev_priv *dpriv) -{ - struct net_device *dev = dscc4_to_dev(dpriv); - u32 state; - int cur; - -try: - cur = dpriv->iqrx_current%IRQ_RING_SIZE; - state = le32_to_cpu(dpriv->iqrx[cur]); - if (!state) - return; - dpriv->iqrx[cur] = 0; - dpriv->iqrx_current++; - - if (state_check(state, dpriv, dev, "Rx") < 0) - return; - - if (!(state & SccEvt)){ - struct RxFD *rx_fd; - - if (debug > 4) - printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name, - state); - state &= 0x00ffffff; - if (state & Err) { /* Hold or reset */ - printk(KERN_DEBUG "%s: Rx ERR\n", dev->name); - cur = dpriv->rx_current%RX_RING_SIZE; - rx_fd = dpriv->rx_fd + cur; - /* - * Presume we're not facing a DMAC receiver reset. - * As We use the rx size-filtering feature of the - * DSCC4, the beginning of a new frame is waiting in - * the rx fifo. I bet a Receive Data Overflow will - * happen most of time but let's try and avoid it. - * Btw (as for RDO) if one experiences ERR whereas - * the system looks rather idle, there may be a - * problem with latency. In this case, increasing - * RX_RING_SIZE may help. - */ - //while (dpriv->rx_needs_refill) { - while (!(rx_fd->state1 & Hold)) { - rx_fd++; - cur++; - if (!(cur = cur%RX_RING_SIZE)) - rx_fd = dpriv->rx_fd; - } - //dpriv->rx_needs_refill--; - try_get_rx_skb(dpriv, dev); - if (!rx_fd->data) - goto try; - rx_fd->state1 &= ~Hold; - rx_fd->state2 = 0x00000000; - rx_fd->end = cpu_to_le32(0xbabeface); - //} - goto try; - } - if (state & Fi) { - dscc4_rx_skb(dpriv, dev); - goto try; - } - if (state & Hi ) { /* HI bit */ - netdev_info(dev, "Rx Hi\n"); - state &= ~Hi; - goto try; - } - } else { /* SccEvt */ - if (debug > 1) { - //FIXME: verifier la presence de tous les evenements - static struct { - u32 mask; - const char *irq_name; - } evts[] = { - { 0x00008000, "TIN"}, - { 0x00000020, "RSC"}, - { 0x00000010, "PCE"}, - { 0x00000008, "PLLA"}, - { 0, NULL} - }, *evt; - - for (evt = evts; evt->irq_name; evt++) { - if (state & evt->mask) { - printk(KERN_DEBUG "%s: %s\n", - dev->name, evt->irq_name); - if (!(state &= ~evt->mask)) - goto try; - } - } - } else { - if (!(state &= ~0x0000c03c)) - goto try; - } - if (state & Cts) { - netdev_info(dev, "CTS transition\n"); - if (!(state &= ~Cts)) /* DEBUG */ - goto try; - } - /* - * Receive Data Overflow (FIXME: fscked) - */ - if (state & Rdo) { - struct RxFD *rx_fd; - void __iomem *scc_addr; - int cur; - - //if (debug) - // dscc4_rx_dump(dpriv); - scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; - - scc_patchl(RxActivate, 0, dpriv, dev, CCR2); - /* - * This has no effect. Why ? - * ORed with TxSccRes, one sees the CFG ack (for - * the TX part only). - */ - scc_writel(RxSccRes, dpriv, dev, CMDR); - dpriv->flags |= RdoSet; - - /* - * Let's try and save something in the received data. - * rx_current must be incremented at least once to - * avoid HOLD in the BRDA-to-be-pointed desc. - */ - do { - cur = dpriv->rx_current++%RX_RING_SIZE; - rx_fd = dpriv->rx_fd + cur; - if (!(rx_fd->state2 & DataComplete)) - break; - if (rx_fd->state2 & FrameAborted) { - dev->stats.rx_over_errors++; - rx_fd->state1 |= Hold; - rx_fd->state2 = 0x00000000; - rx_fd->end = cpu_to_le32(0xbabeface); - } else - dscc4_rx_skb(dpriv, dev); - } while (1); - - if (debug > 0) { - if (dpriv->flags & RdoSet) - printk(KERN_DEBUG - "%s: no RDO in Rx data\n", DRV_NAME); - } -#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY - /* - * FIXME: must the reset be this violent ? - */ -#warning "FIXME: CH0BRDA" - writel(dpriv->rx_fd_dma + - (dpriv->rx_current%RX_RING_SIZE)* - sizeof(struct RxFD), scc_addr + CH0BRDA); - writel(MTFi|Rdr|Idr, scc_addr + CH0CFG); - if (dscc4_do_action(dev, "RDR") < 0) { - netdev_err(dev, "RDO recovery failed(RDR)\n"); - goto rdo_end; - } - writel(MTFi|Idr, scc_addr + CH0CFG); - if (dscc4_do_action(dev, "IDR") < 0) { - netdev_err(dev, "RDO recovery failed(IDR)\n"); - goto rdo_end; - } - rdo_end: -#endif - scc_patchl(0, RxActivate, dpriv, dev, CCR2); - goto try; - } - if (state & Cd) { - netdev_info(dev, "CD transition\n"); - if (!(state &= ~Cd)) /* DEBUG */ - goto try; - } - if (state & Flex) { - printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME); - if (!(state &= ~Flex)) - goto try; - } - } -} - -/* - * I had expected the following to work for the first descriptor - * (tx_fd->state = 0xc0000000) - * - Hold=1 (don't try and branch to the next descripto); - * - No=0 (I want an empty data section, i.e. size=0); - * - Fe=1 (required by No=0 or we got an Err irq and must reset). - * It failed and locked solid. Thus the introduction of a dummy skb. - * Problem is acknowledged in errata sheet DS5. Joy :o/ - */ -static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) -{ - struct sk_buff *skb; - - skb = dev_alloc_skb(DUMMY_SKB_SIZE); - if (skb) { - struct device *d = &dpriv->pci_priv->pdev->dev; - int last = dpriv->tx_dirty%TX_RING_SIZE; - struct TxFD *tx_fd = dpriv->tx_fd + last; - dma_addr_t addr; - - skb->len = DUMMY_SKB_SIZE; - skb_copy_to_linear_data(skb, version, - strlen(version) % DUMMY_SKB_SIZE); - addr = dma_map_single(d, skb->data, DUMMY_SKB_SIZE, - DMA_TO_DEVICE); - if (dma_mapping_error(d, addr)) { - dev_kfree_skb_any(skb); - return NULL; - } - tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); - tx_fd->data = cpu_to_le32(addr); - dpriv->tx_skbuff[last] = skb; - } - return skb; -} - -static int dscc4_init_ring(struct net_device *dev) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - struct device *d = &dpriv->pci_priv->pdev->dev; - struct TxFD *tx_fd; - struct RxFD *rx_fd; - void *ring; - int i; - - ring = dma_alloc_coherent(d, RX_TOTAL_SIZE, &dpriv->rx_fd_dma, - GFP_KERNEL); - if (!ring) - goto err_out; - dpriv->rx_fd = rx_fd = (struct RxFD *) ring; - - ring = dma_alloc_coherent(d, TX_TOTAL_SIZE, &dpriv->tx_fd_dma, - GFP_KERNEL); - if (!ring) - goto err_free_dma_rx; - dpriv->tx_fd = tx_fd = (struct TxFD *) ring; - - memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE); - dpriv->tx_dirty = 0xffffffff; - i = dpriv->tx_current = 0; - do { - tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE); - tx_fd->complete = 0x00000000; - /* FIXME: NULL should be ok - to be tried */ - tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma); - (tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma + - (++i%TX_RING_SIZE)*sizeof(*tx_fd)); - } while (i < TX_RING_SIZE); - - if (!dscc4_init_dummy_skb(dpriv)) - goto err_free_dma_tx; - - memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE); - i = dpriv->rx_dirty = dpriv->rx_current = 0; - do { - /* size set by the host. Multiple of 4 bytes please */ - rx_fd->state1 = HiDesc; - rx_fd->state2 = 0x00000000; - rx_fd->end = cpu_to_le32(0xbabeface); - rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU); - // FIXME: return value verifiee mais traitement suspect - if (try_get_rx_skb(dpriv, dev) >= 0) - dpriv->rx_dirty++; - (rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma + - (++i%RX_RING_SIZE)*sizeof(*rx_fd)); - } while (i < RX_RING_SIZE); - - return 0; - -err_free_dma_tx: - dma_free_coherent(d, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma); -err_free_dma_rx: - dma_free_coherent(d, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); -err_out: - return -ENOMEM; -} - -static void dscc4_remove_one(struct pci_dev *pdev) -{ - struct dscc4_pci_priv *ppriv; - struct dscc4_dev_priv *root; - void __iomem *ioaddr; - int i; - - ppriv = pci_get_drvdata(pdev); - root = ppriv->root; - - ioaddr = root->base_addr; - - dscc4_pci_reset(pdev, ioaddr); - - free_irq(pdev->irq, root); - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg, - ppriv->iqcfg_dma); - for (i = 0; i < dev_per_card; i++) { - struct dscc4_dev_priv *dpriv = root + i; - - dscc4_release_ring(dpriv); - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqrx, dpriv->iqrx_dma); - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqtx, dpriv->iqtx_dma); - } - - dscc4_free1(pdev); - - iounmap(ioaddr); - - pci_release_region(pdev, 1); - pci_release_region(pdev, 0); - - pci_disable_device(pdev); -} - -static int dscc4_hdlc_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - - if (encoding != ENCODING_NRZ && - encoding != ENCODING_NRZI && - encoding != ENCODING_FM_MARK && - encoding != ENCODING_FM_SPACE && - encoding != ENCODING_MANCHESTER) - return -EINVAL; - - if (parity != PARITY_NONE && - parity != PARITY_CRC16_PR0_CCITT && - parity != PARITY_CRC16_PR1_CCITT && - parity != PARITY_CRC32_PR0_CCITT && - parity != PARITY_CRC32_PR1_CCITT) - return -EINVAL; - - dpriv->encoding = encoding; - dpriv->parity = parity; - return 0; -} - -#ifndef MODULE -static int __init dscc4_setup(char *str) -{ - int *args[] = { &debug, &quartz, NULL }, **p = args; - - while (*p && (get_option(&str, *p) == 2)) - p++; - return 1; -} - -__setup("dscc4.setup=", dscc4_setup); -#endif - -static const struct pci_device_id dscc4_pci_tbl[] = { - { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4, - PCI_ANY_ID, PCI_ANY_ID, }, - { 0,} -}; -MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl); - -static struct pci_driver dscc4_driver = { - .name = DRV_NAME, - .id_table = dscc4_pci_tbl, - .probe = dscc4_init_one, - .remove = dscc4_remove_one, -}; - -module_pci_driver(dscc4_driver); diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c index 6544ac9df047..73f5892ce6c1 100644 --- a/drivers/net/wimax/i2400m/debugfs.c +++ b/drivers/net/wimax/i2400m/debugfs.c @@ -30,15 +30,6 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_netdev_queue_stopped, debugfs_netdev_queue_stopped_get, NULL, "%llu\n"); - -static -struct dentry *debugfs_create_netdev_queue_stopped( - const char *name, struct dentry *parent, struct i2400m *i2400m) -{ - return debugfs_create_file(name, 0400, parent, i2400m, - &fops_netdev_queue_stopped); -} - /* * We don't allow partial reads of this file, as then the reader would * get weirdly confused data as it is updated. @@ -167,15 +158,6 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_suspend, NULL, debugfs_i2400m_suspend_set, "%llu\n"); -static -struct dentry *debugfs_create_i2400m_suspend( - const char *name, struct dentry *parent, struct i2400m *i2400m) -{ - return debugfs_create_file(name, 0200, parent, i2400m, - &fops_i2400m_suspend); -} - - /* * Reset the device * @@ -205,73 +187,25 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_reset, NULL, debugfs_i2400m_reset_set, "%llu\n"); -static -struct dentry *debugfs_create_i2400m_reset( - const char *name, struct dentry *parent, struct i2400m *i2400m) +void i2400m_debugfs_add(struct i2400m *i2400m) { - return debugfs_create_file(name, 0200, parent, i2400m, - &fops_i2400m_reset); -} - - -#define __debugfs_register(prefix, name, parent) \ -do { \ - result = d_level_register_debugfs(prefix, name, parent); \ - if (result < 0) \ - goto error; \ -} while (0) - - -int i2400m_debugfs_add(struct i2400m *i2400m) -{ - int result; - struct device *dev = i2400m_dev(i2400m); struct dentry *dentry = i2400m->wimax_dev.debugfs_dentry; - struct dentry *fd; dentry = debugfs_create_dir("i2400m", dentry); - result = PTR_ERR(dentry); - if (IS_ERR(dentry)) { - if (result == -ENODEV) - result = 0; /* No debugfs support */ - goto error; - } i2400m->debugfs_dentry = dentry; - __debugfs_register("dl_", control, dentry); - __debugfs_register("dl_", driver, dentry); - __debugfs_register("dl_", debugfs, dentry); - __debugfs_register("dl_", fw, dentry); - __debugfs_register("dl_", netdev, dentry); - __debugfs_register("dl_", rfkill, dentry); - __debugfs_register("dl_", rx, dentry); - __debugfs_register("dl_", tx, dentry); - - fd = debugfs_create_size_t("tx_in", 0400, dentry, - &i2400m->tx_in); - result = PTR_ERR(fd); - if (IS_ERR(fd) && result != -ENODEV) { - dev_err(dev, "Can't create debugfs entry " - "tx_in: %d\n", result); - goto error; - } - fd = debugfs_create_size_t("tx_out", 0400, dentry, - &i2400m->tx_out); - result = PTR_ERR(fd); - if (IS_ERR(fd) && result != -ENODEV) { - dev_err(dev, "Can't create debugfs entry " - "tx_out: %d\n", result); - goto error; - } + d_level_register_debugfs("dl_", control, dentry); + d_level_register_debugfs("dl_", driver, dentry); + d_level_register_debugfs("dl_", debugfs, dentry); + d_level_register_debugfs("dl_", fw, dentry); + d_level_register_debugfs("dl_", netdev, dentry); + d_level_register_debugfs("dl_", rfkill, dentry); + d_level_register_debugfs("dl_", rx, dentry); + d_level_register_debugfs("dl_", tx, dentry); - fd = debugfs_create_u32("state", 0600, dentry, - &i2400m->state); - result = PTR_ERR(fd); - if (IS_ERR(fd) && result != -ENODEV) { - dev_err(dev, "Can't create debugfs entry " - "state: %d\n", result); - goto error; - } + debugfs_create_size_t("tx_in", 0400, dentry, &i2400m->tx_in); + debugfs_create_size_t("tx_out", 0400, dentry, &i2400m->tx_out); + debugfs_create_u32("state", 0600, dentry, &i2400m->state); /* * Trace received messages from user space @@ -295,60 +229,22 @@ int i2400m_debugfs_add(struct i2400m *i2400m) * It is not really very atomic, but it is also not too * critical. */ - fd = debugfs_create_u8("trace_msg_from_user", 0600, dentry, - &i2400m->trace_msg_from_user); - result = PTR_ERR(fd); - if (IS_ERR(fd) && result != -ENODEV) { - dev_err(dev, "Can't create debugfs entry " - "trace_msg_from_user: %d\n", result); - goto error; - } + debugfs_create_u8("trace_msg_from_user", 0600, dentry, + &i2400m->trace_msg_from_user); - fd = debugfs_create_netdev_queue_stopped("netdev_queue_stopped", - dentry, i2400m); - result = PTR_ERR(fd); - if (IS_ERR(fd) && result != -ENODEV) { - dev_err(dev, "Can't create debugfs entry " - "netdev_queue_stopped: %d\n", result); - goto error; - } + debugfs_create_file("netdev_queue_stopped", 0400, dentry, i2400m, + &fops_netdev_queue_stopped); - fd = debugfs_create_file("rx_stats", 0600, dentry, i2400m, - &i2400m_rx_stats_fops); - result = PTR_ERR(fd); - if (IS_ERR(fd) && result != -ENODEV) { - dev_err(dev, "Can't create debugfs entry " - "rx_stats: %d\n", result); - goto error; - } + debugfs_create_file("rx_stats", 0600, dentry, i2400m, + &i2400m_rx_stats_fops); - fd = debugfs_create_file("tx_stats", 0600, dentry, i2400m, - &i2400m_tx_stats_fops); - result = PTR_ERR(fd); - if (IS_ERR(fd) && result != -ENODEV) { - dev_err(dev, "Can't create debugfs entry " - "tx_stats: %d\n", result); - goto error; - } + debugfs_create_file("tx_stats", 0600, dentry, i2400m, + &i2400m_tx_stats_fops); - fd = debugfs_create_i2400m_suspend("suspend", dentry, i2400m); - result = PTR_ERR(fd); - if (IS_ERR(fd) && result != -ENODEV) { - dev_err(dev, "Can't create debugfs entry suspend: %d\n", - result); - goto error; - } + debugfs_create_file("suspend", 0200, dentry, i2400m, + &fops_i2400m_suspend); - fd = debugfs_create_i2400m_reset("reset", dentry, i2400m); - result = PTR_ERR(fd); - if (IS_ERR(fd) && result != -ENODEV) { - dev_err(dev, "Can't create debugfs entry reset: %d\n", result); - goto error; - } - - result = 0; -error: - return result; + debugfs_create_file("reset", 0200, dentry, i2400m, &fops_i2400m_reset); } void i2400m_debugfs_rm(struct i2400m *i2400m) diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c index 0a29222a1bf9..f66c0f8f6f4a 100644 --- a/drivers/net/wimax/i2400m/driver.c +++ b/drivers/net/wimax/i2400m/driver.c @@ -905,11 +905,7 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags) goto error_sysfs_setup; } - result = i2400m_debugfs_add(i2400m); - if (result < 0) { - dev_err(dev, "cannot setup i2400m's debugfs: %d\n", result); - goto error_debugfs_setup; - } + i2400m_debugfs_add(i2400m); result = i2400m_dev_start(i2400m, bm_flags); if (result < 0) @@ -919,7 +915,6 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags) error_dev_start: i2400m_debugfs_rm(i2400m); -error_debugfs_setup: sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, &i2400m_dev_attr_group); error_sysfs_setup: diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index 489cba9b284d..6c9a41bff2e0 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -397,14 +397,9 @@ int i2400m_is_boot_barker(struct i2400m *i2400m, /* Short circuit if we have already discovered the barker * associated with the device. */ - if (i2400m->barker - && !memcmp(buf, i2400m->barker, sizeof(i2400m->barker->data))) { - unsigned index = (i2400m->barker - i2400m_barker_db) - / sizeof(*i2400m->barker); - d_printf(2, dev, "boot barker cache-confirmed #%u/%08x\n", - index, le32_to_cpu(i2400m->barker->data[0])); + if (i2400m->barker && + !memcmp(buf, i2400m->barker, sizeof(i2400m->barker->data))) return 0; - } for (i = 0; i < i2400m_barker_db_used; i++) { barker = &i2400m_barker_db[i]; diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h index 5a34e72bab9a..a3733a6d14f5 100644 --- a/drivers/net/wimax/i2400m/i2400m.h +++ b/drivers/net/wimax/i2400m/i2400m.h @@ -812,13 +812,10 @@ enum i2400m_pt; int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt); #ifdef CONFIG_DEBUG_FS -int i2400m_debugfs_add(struct i2400m *); +void i2400m_debugfs_add(struct i2400m *); void i2400m_debugfs_rm(struct i2400m *); #else -static inline int i2400m_debugfs_add(struct i2400m *i2400m) -{ - return 0; -} +static inline void i2400m_debugfs_add(struct i2400m *i2400m) {} static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {} #endif diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index d28b96d06919..c9fb619a9e01 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c @@ -1253,7 +1253,6 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) skb_len = skb->len; d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n", i2400m, skb, skb_len); - result = -EIO; msg_hdr = (void *) skb->data; result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len); if (result < 0) diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 2075e7b1fff6..6953f904232f 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -366,61 +366,25 @@ struct d_level D_LEVEL[] = { }; size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); - -#define __debugfs_register(prefix, name, parent) \ -do { \ - result = d_level_register_debugfs(prefix, name, parent); \ - if (result < 0) \ - goto error; \ -} while (0) - - static -int i2400mu_debugfs_add(struct i2400mu *i2400mu) +void i2400mu_debugfs_add(struct i2400mu *i2400mu) { - int result; - struct device *dev = &i2400mu->usb_iface->dev; struct dentry *dentry = i2400mu->i2400m.wimax_dev.debugfs_dentry; - struct dentry *fd; dentry = debugfs_create_dir("i2400m-usb", dentry); - result = PTR_ERR(dentry); - if (IS_ERR(dentry)) { - if (result == -ENODEV) - result = 0; /* No debugfs support */ - goto error; - } i2400mu->debugfs_dentry = dentry; - __debugfs_register("dl_", usb, dentry); - __debugfs_register("dl_", fw, dentry); - __debugfs_register("dl_", notif, dentry); - __debugfs_register("dl_", rx, dentry); - __debugfs_register("dl_", tx, dentry); - /* Don't touch these if you don't know what you are doing */ - fd = debugfs_create_u8("rx_size_auto_shrink", 0600, dentry, - &i2400mu->rx_size_auto_shrink); - result = PTR_ERR(fd); - if (IS_ERR(fd) && result != -ENODEV) { - dev_err(dev, "Can't create debugfs entry " - "rx_size_auto_shrink: %d\n", result); - goto error; - } + d_level_register_debugfs("dl_", usb, dentry); + d_level_register_debugfs("dl_", fw, dentry); + d_level_register_debugfs("dl_", notif, dentry); + d_level_register_debugfs("dl_", rx, dentry); + d_level_register_debugfs("dl_", tx, dentry); - fd = debugfs_create_size_t("rx_size", 0600, dentry, - &i2400mu->rx_size); - result = PTR_ERR(fd); - if (IS_ERR(fd) && result != -ENODEV) { - dev_err(dev, "Can't create debugfs entry " - "rx_size: %d\n", result); - goto error; - } - - return 0; + /* Don't touch these if you don't know what you are doing */ + debugfs_create_u8("rx_size_auto_shrink", 0600, dentry, + &i2400mu->rx_size_auto_shrink); -error: - debugfs_remove_recursive(i2400mu->debugfs_dentry); - return result; + debugfs_create_size_t("rx_size", 0600, dentry, &i2400mu->rx_size); } @@ -534,15 +498,9 @@ int i2400mu_probe(struct usb_interface *iface, dev_err(dev, "cannot setup device: %d\n", result); goto error_setup; } - result = i2400mu_debugfs_add(i2400mu); - if (result < 0) { - dev_err(dev, "Can't register i2400mu's debugfs: %d\n", result); - goto error_debugfs_add; - } + i2400mu_debugfs_add(i2400mu); return 0; -error_debugfs_add: - i2400m_release(i2400m); error_setup: usb_set_intfdata(iface, NULL); usb_put_dev(i2400mu->usb_dev); diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 83a7fb68fd24..53f1095de8ff 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2151,6 +2151,10 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, struct ath10k_peer *peer; struct htt_rx_indication_mpdu_range *mpdu_ranges; struct fw_rx_desc_hl *fw_desc; + enum htt_txrx_sec_cast_type sec_index; + enum htt_security_types sec_type; + union htt_rx_pn_t new_pn = {0}; + struct htt_hl_rx_desc *rx_desc; struct ieee80211_hdr *hdr; struct ieee80211_rx_status *rx_status; u16 peer_id; @@ -2158,9 +2162,11 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, int num_mpdu_ranges; size_t tot_hdr_len; struct ieee80211_channel *ch; - bool pn_invalid; + bool pn_invalid, qos, first_msdu; + u32 tid, rx_desc_info; peer_id = __le16_to_cpu(rx->hdr.peer_id); + tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); @@ -2168,6 +2174,9 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, if (!peer && peer_id != HTT_INVALID_PEERID) ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id); + if (!peer) + return true; + num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx); @@ -2192,10 +2201,24 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, goto err; } - if (check_pn_type == HTT_RX_PN_CHECK) { + rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges]; + rx_desc_info = __le32_to_cpu(rx_desc->info); + + if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST)) + sec_index = HTT_TXRX_SEC_MCAST; + else + sec_index = HTT_TXRX_SEC_UCAST; + + sec_type = peer->rx_pn[sec_index].sec_type; + first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU; + + ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); + + if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) { spin_lock_bh(&ar->data_lock); pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx); spin_unlock_bh(&ar->data_lock); + if (pn_invalid) goto err; } @@ -2211,6 +2234,7 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, skb_pull(skb, tot_hdr_len); hdr = (struct ieee80211_hdr *)skb->data; + qos = ieee80211_is_data_qos(hdr->frame_control); rx_status = IEEE80211_SKB_RXCB(skb); rx_status->chains |= BIT(0); if (rx->ppdu.combined_rssi == 0) { @@ -2254,6 +2278,55 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; + + if (tid < IEEE80211_NUM_TIDS && + first_msdu && + check_pn_type == HTT_RX_PN_CHECK && + (sec_type == HTT_SECURITY_AES_CCMP || + sec_type == HTT_SECURITY_TKIP || + sec_type == HTT_SECURITY_TKIP_NOMIC)) { + u8 offset, *ivp, i; + s8 keyidx = 0; + __le64 pn48 = cpu_to_le64(new_pn.pn48); + + hdr = (struct ieee80211_hdr *)skb->data; + offset = ieee80211_hdrlen(hdr->frame_control); + hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED); + rx_status->flag &= ~RX_FLAG_IV_STRIPPED; + + memmove(skb->data - IEEE80211_CCMP_HDR_LEN, + skb->data, offset); + skb_push(skb, IEEE80211_CCMP_HDR_LEN); + ivp = skb->data + offset; + memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN); + /* Ext IV */ + ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV; + + for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { + if (peer->keys[i] && + peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE) + keyidx = peer->keys[i]->keyidx; + } + + /* Key ID */ + ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6; + + if (sec_type == HTT_SECURITY_AES_CCMP) { + rx_status->flag |= RX_FLAG_MIC_STRIPPED; + /* pn 0, pn 1 */ + memcpy(skb->data + offset, &pn48, 2); + /* pn 1, pn 3 , pn 34 , pn 5 */ + memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); + } else { + rx_status->flag |= RX_FLAG_ICV_STRIPPED; + /* TSC 0 */ + memcpy(skb->data + offset + 2, &pn48, 1); + /* TSC 1 */ + memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1); + /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/ + memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); + } + } } if (tkip_mic_type == HTT_RX_TKIP_MIC) @@ -2263,6 +2336,20 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) rx_status->flag |= RX_FLAG_MMIC_ERROR; + if (!qos && tid < IEEE80211_NUM_TIDS) { + u8 offset; + __le16 qos_ctrl = 0; + + hdr = (struct ieee80211_hdr *)skb->data; + offset = ieee80211_hdrlen(hdr->frame_control); + + hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); + memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset); + skb_push(skb, IEEE80211_QOS_CTL_LEN); + qos_ctrl = cpu_to_le16(tid); + memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN); + } + ieee80211_rx_ni(ar->hw, skb); /* We have delivered the skb to the upper layers (mac80211) so we diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 2ef717f18795..a182c0944cc7 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -1237,6 +1237,7 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm struct ath10k *ar = htt->ar; int res, data_len; struct htt_cmd_hdr *cmd_hdr; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct htt_data_tx_desc *tx_desc; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct sk_buff *tmp_skb; @@ -1247,6 +1248,13 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm u16 flags1 = 0; u16 msdu_id = 0; + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } + data_len = msdu->len; switch (txmode) { diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 0606416dc971..a6d21856b7e7 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -5503,10 +5503,6 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); - spin_lock_bh(&ar->data_lock); - ath10k_mac_vif_beacon_cleanup(arvif); - spin_unlock_bh(&ar->data_lock); - ret = ath10k_spectral_vif_stop(arvif); if (ret) ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", @@ -5575,6 +5571,11 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, peer->vif = NULL; } } + + /* Clean this up late, less opportunity for firmware to access + * DMA memory we have deleted. + */ + ath10k_mac_vif_beacon_cleanup(arvif); spin_unlock_bh(&ar->data_lock); ath10k_peer_cleanup(ar, arvif->vdev_id); @@ -6970,7 +6971,8 @@ exit: return ret; } -static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) +static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 8ed4fbd8d6c3..9870d2d095c8 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -381,16 +381,11 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar, struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data; bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT; enum ath10k_htc_ep_id eid; - u16 payload_len; u8 *trailer; int ret; - payload_len = le16_to_cpu(htc_hdr->len); - skb->len = payload_len + sizeof(struct ath10k_htc_hdr); - if (trailer_present) { - trailer = skb->data + sizeof(*htc_hdr) + - payload_len - htc_hdr->trailer_len; + trailer = skb->data + skb->len - htc_hdr->trailer_len; eid = pipe_id_to_eid(htc_hdr->eid); @@ -632,13 +627,31 @@ static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar, { struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); struct sk_buff *skb = pkt->skb; + struct ath10k_htc_hdr *htc_hdr; int ret; ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, skb->data, pkt->alloc_len); + if (ret) + goto out; + + /* Update actual length. The original length may be incorrect, + * as the FW will bundle multiple packets as long as their sizes + * fit within the same aligned length (pkt->alloc_len). + */ + htc_hdr = (struct ath10k_htc_hdr *)skb->data; + pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); + if (pkt->act_len > pkt->alloc_len) { + ath10k_warn(ar, "rx packet too large (%zu > %zu)\n", + pkt->act_len, pkt->alloc_len); + ret = -EMSGSIZE; + goto out; + } + + skb_put(skb, pkt->act_len); + +out: pkt->status = ret; - if (!ret) - skb_put(skb, pkt->act_len); return ret; } diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 2985bb17decd..4d5d10c01064 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -841,7 +841,7 @@ static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar, struct wmi_ch_info_ev_arg *arg) { const void **tb; - const struct wmi_chan_info_event *ev; + const struct wmi_tlv_chan_info_event *ev; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index d691f06e58f2..649b229a41e9 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1615,6 +1615,22 @@ struct chan_info_params { #define WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL BIT(9) +struct wmi_tlv_chan_info_event { + __le32 err_code; + __le32 freq; + __le32 cmd_flags; + __le32 noise_floor; + __le32 rx_clear_count; + __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; + __le32 my_bss_rx_cycle_count; + __le32 rx_11b_mode_data_duration; + __le32 tx_frame_cnt; + __le32 mac_clk_mhz; +} __packed; + struct wmi_tlv_mgmt_tx_compl_ev { __le32 desc_id; __le32 status; diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 838768c98adc..e80dbe7e8f4c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -6533,14 +6533,6 @@ struct wmi_chan_info_event { __le32 noise_floor; __le32 rx_clear_count; __le32 cycle_count; - __le32 chan_tx_pwr_range; - __le32 chan_tx_pwr_tp; - __le32 rx_frame_count; - __le32 my_bss_rx_cycle_count; - __le32 rx_11b_mode_data_duration; - __le32 tx_frame_cnt; - __le32 mac_clk_mhz; - } __packed; struct wmi_10_4_chan_info_event { diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c index 65c31da43c47..998947ef63b6 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c +++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c @@ -2855,8 +2855,8 @@ static void *ath6kl_htc_mbox_create(struct ath6kl *ar) target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL); if (!target->dev) { ath6kl_err("unable to allocate memory\n"); - status = -ENOMEM; - goto err_htc_cleanup; + kfree(target); + return NULL; } spin_lock_init(&target->htc_lock); diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index 4defb7a0330f..53b66e9434c9 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -132,6 +132,10 @@ ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe) struct ath6kl_urb_context *urb_context = NULL; unsigned long flags; + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return NULL; + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); if (!list_empty(&pipe->urb_list_head)) { urb_context = @@ -150,6 +154,10 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe, { unsigned long flags; + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return; + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); pipe->urb_cnt++; diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 5601cfd6a293..2d1247f61297 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -157,6 +157,22 @@ config ATH9K_PCOEM depends on ATH9K default y +config ATH9K_PCI_NO_EEPROM + tristate "Atheros ath9k pci loader for EEPROM-less chips" + depends on ATH9K_PCI + default n + help + This separate driver provides a loader in order to support the + AR500X to AR92XX-generation of ath9k PCI(e) WiFi chips, which have + their initialization data (which contains the real PCI Device ID + that ath9k will need) stored together with the calibration data out + of reach for the ath9k chip. + + These devices are usually various network appliances, routers or + access Points and such. + + If unsure say N. + config ATH9K_HTC tristate "Atheros HTC based wireless cards support" depends on USB && MAC80211 diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index 15af0a836925..eff94bcd1f0a 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -77,3 +77,5 @@ ath9k_htc-y += htc_hst.o \ ath9k_htc-$(CONFIG_ATH9K_HTC_DEBUGFS) += htc_drv_debug.o obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o + +obj-$(CONFIG_ATH9K_PCI_NO_EEPROM) += ath9k_pci_owl_loader.o diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c new file mode 100644 index 000000000000..159490f5a111 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: ISC +/* Initialize Owl Emulation Devices + * + * Copyright (C) 2016 Christian Lamparter <chunkeey@gmail.com> + * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + * + * Some devices (like the Cisco Meraki Z1 Cloud Managed Teleworker Gateway) + * need to be able to initialize the PCIe wifi device. Normally, this is done + * during the early stages as a pci quirk. + * However, this isn't possible for devices which have the init code for the + * Atheros chip stored on UBI Volume on NAND. Hence, this module can be used to + * initialize the chip when the user-space is ready to extract the init code. + */ +#include <linux/module.h> +#include <linux/version.h> +#include <linux/completion.h> +#include <linux/etherdevice.h> +#include <linux/firmware.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/ath9k_platform.h> + +struct owl_ctx { + struct completion eeprom_load; +}; + +#define EEPROM_FILENAME_LEN 100 + +#define AR5416_EEPROM_MAGIC 0xa55a + +static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data, + size_t cal_len) +{ + void __iomem *mem; + const void *cal_end = (void *)cal_data + cal_len; + const struct { + u16 reg; + u16 low_val; + u16 high_val; + } __packed * data; + u16 cmd; + u32 bar0; + bool swap_needed = false; + + if (*cal_data != AR5416_EEPROM_MAGIC) { + if (*cal_data != swab16(AR5416_EEPROM_MAGIC)) { + dev_err(&pdev->dev, "invalid calibration data\n"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "calibration data needs swapping\n"); + swap_needed = true; + } + + dev_info(&pdev->dev, "fixup device configuration\n"); + + mem = pcim_iomap(pdev, 0, 0); + if (!mem) { + dev_err(&pdev->dev, "ioremap error\n"); + return -EINVAL; + } + + pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0); + pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, + pci_resource_start(pdev, 0)); + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; + pci_write_config_word(pdev, PCI_COMMAND, cmd); + + /* set pointer to first reg address */ + for (data = (const void *)(cal_data + 3); + (const void *)data <= cal_end && data->reg != (u16)~0; + data++) { + u32 val; + u16 reg; + + reg = data->reg; + val = data->low_val; + val |= ((u32)data->high_val) << 16; + + if (swap_needed) { + reg = swab16(reg); + val = swahb32(val); + } + + __raw_writel(val, mem + reg); + usleep_range(100, 120); + } + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + cmd &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + + pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, bar0); + pcim_iounmap(pdev, mem); + + pci_disable_device(pdev); + + return 0; +} + +static void owl_fw_cb(const struct firmware *fw, void *context) +{ + struct pci_dev *pdev = (struct pci_dev *)context; + struct owl_ctx *ctx = (struct owl_ctx *)pci_get_drvdata(pdev); + struct pci_bus *bus; + + complete(&ctx->eeprom_load); + + if (!fw) { + dev_err(&pdev->dev, "no eeprom data received.\n"); + goto release; + } + + /* also note that we are doing *u16 operations on the file */ + if (fw->size > 4096 || fw->size < 0x200 || (fw->size & 1) == 1) { + dev_err(&pdev->dev, "eeprom file has an invalid size.\n"); + goto release; + } + + if (ath9k_pci_fixup(pdev, (const u16 *)fw->data, fw->size)) + goto release; + + pci_lock_rescan_remove(); + bus = pdev->bus; + pci_stop_and_remove_bus_device(pdev); + /* the device should come back with the proper + * ProductId. But we have to initiate a rescan. + */ + pci_rescan_bus(bus); + pci_unlock_rescan_remove(); + +release: + release_firmware(fw); +} + +static const char *owl_get_eeprom_name(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + char *eeprom_name; + + dev_dbg(dev, "using auto-generated eeprom filename\n"); + + eeprom_name = devm_kzalloc(dev, EEPROM_FILENAME_LEN, GFP_KERNEL); + if (!eeprom_name) + return NULL; + + /* this should match the pattern used in ath9k/init.c */ + scnprintf(eeprom_name, EEPROM_FILENAME_LEN, "ath9k-eeprom-pci-%s.bin", + dev_name(dev)); + + return eeprom_name; +} + +static int owl_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct owl_ctx *ctx; + const char *eeprom_name; + int err = 0; + + if (pcim_enable_device(pdev)) + return -EIO; + + pcim_pin_device(pdev); + + eeprom_name = owl_get_eeprom_name(pdev); + if (!eeprom_name) { + dev_err(&pdev->dev, "no eeprom filename found.\n"); + return -ENODEV; + } + + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + init_completion(&ctx->eeprom_load); + + pci_set_drvdata(pdev, ctx); + err = request_firmware_nowait(THIS_MODULE, true, eeprom_name, + &pdev->dev, GFP_KERNEL, pdev, owl_fw_cb); + if (err) + dev_err(&pdev->dev, "failed to request caldata (%d).\n", err); + + return err; +} + +static void owl_remove(struct pci_dev *pdev) +{ + struct owl_ctx *ctx = pci_get_drvdata(pdev); + + if (ctx) { + wait_for_completion(&ctx->eeprom_load); + pci_set_drvdata(pdev, NULL); + } +} + +static const struct pci_device_id owl_pci_table[] = { + { PCI_VDEVICE(ATHEROS, 0xff1c) }, /* PCIe */ + { PCI_VDEVICE(ATHEROS, 0xff1d) }, /* PCI */ + { }, +}; +MODULE_DEVICE_TABLE(pci, owl_pci_table); + +static struct pci_driver owl_driver = { + .name = KBUILD_MODNAME, + .id_table = owl_pci_table, + .probe = owl_probe, + .remove = owl_remove, +}; +module_pci_driver(owl_driver); +MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>"); +MODULE_DESCRIPTION("External EEPROM data loader for Atheros AR500X to AR92XX"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c index f112fa5b2eac..fbeb4a739d32 100644 --- a/drivers/net/wireless/ath/ath9k/dynack.c +++ b/drivers/net/wireless/ath/ath9k/dynack.c @@ -20,12 +20,31 @@ #define COMPUTE_TO (5 * HZ) #define LATEACK_DELAY (10 * HZ) -#define LATEACK_TO 256 -#define MAX_DELAY 300 #define EWMA_LEVEL 96 #define EWMA_DIV 128 /** + * ath_dynack_get_max_to - set max timeout according to channel width + * @ah: ath hw + * + */ +static u32 ath_dynack_get_max_to(struct ath_hw *ah) +{ + const struct ath9k_channel *chan = ah->curchan; + + if (!chan) + return 300; + + if (IS_CHAN_HT40(chan)) + return 300; + if (IS_CHAN_HALF_RATE(chan)) + return 750; + if (IS_CHAN_QUARTER_RATE(chan)) + return 1500; + return 600; +} + +/** * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation * */ @@ -79,6 +98,24 @@ static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac) } /** + * ath_dynack_set_timeout - configure timeouts/slottime registers + * @ah: ath hw + * @to: timeout value + * + */ +static void ath_dynack_set_timeout(struct ath_hw *ah, int to) +{ + struct ath_common *common = ath9k_hw_common(ah); + int slottime = (to - 3) / 2; + + ath_dbg(common, DYNACK, "ACK timeout %u slottime %u\n", + to, slottime); + ath9k_hw_setslottime(ah, slottime); + ath9k_hw_set_ack_timeout(ah, to); + ath9k_hw_set_cts_timeout(ah, to); +} + +/** * ath_dynack_compute_ackto - compute ACK timeout as the maximum STA timeout * @ah: ath hw * @@ -86,7 +123,6 @@ static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac) */ static void ath_dynack_compute_ackto(struct ath_hw *ah) { - struct ath_common *common = ath9k_hw_common(ah); struct ath_dynack *da = &ah->dynack; struct ath_node *an; int to = 0; @@ -96,15 +132,8 @@ static void ath_dynack_compute_ackto(struct ath_hw *ah) to = an->ackto; if (to && da->ackto != to) { - u32 slottime; - - slottime = (to - 3) / 2; + ath_dynack_set_timeout(ah, to); da->ackto = to; - ath_dbg(common, DYNACK, "ACK timeout %u slottime %u\n", - da->ackto, slottime); - ath9k_hw_setslottime(ah, slottime); - ath9k_hw_set_ack_timeout(ah, da->ackto); - ath9k_hw_set_cts_timeout(ah, da->ackto); } } @@ -116,15 +145,16 @@ static void ath_dynack_compute_ackto(struct ath_hw *ah) */ static void ath_dynack_compute_to(struct ath_hw *ah) { - u32 ackto, ack_ts; - u8 *dst, *src; + struct ath_dynack *da = &ah->dynack; + u32 ackto, ack_ts, max_to; struct ieee80211_sta *sta; - struct ath_node *an; struct ts_info *st_ts; - struct ath_dynack *da = &ah->dynack; + struct ath_node *an; + u8 *dst, *src; rcu_read_lock(); + max_to = ath_dynack_get_max_to(ah); while (da->st_rbf.h_rb != da->st_rbf.t_rb && da->ack_rbf.h_rb != da->ack_rbf.t_rb) { ack_ts = da->ack_rbf.tstamp[da->ack_rbf.h_rb]; @@ -140,7 +170,7 @@ static void ath_dynack_compute_to(struct ath_hw *ah) if (ack_ts > st_ts->tstamp + st_ts->dur) { ackto = ack_ts - st_ts->tstamp - st_ts->dur; - if (ackto < MAX_DELAY) { + if (ackto < max_to) { sta = ieee80211_find_sta_by_ifaddr(ah->hw, dst, src); if (sta) { @@ -197,11 +227,10 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, if (ieee80211_is_assoc_req(hdr->frame_control) || ieee80211_is_assoc_resp(hdr->frame_control) || ieee80211_is_auth(hdr->frame_control)) { - ath_dbg(common, DYNACK, "late ack\n"); + u32 max_to = ath_dynack_get_max_to(ah); - ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2); - ath9k_hw_set_ack_timeout(ah, LATEACK_TO); - ath9k_hw_set_cts_timeout(ah, LATEACK_TO); + ath_dbg(common, DYNACK, "late ack\n"); + ath_dynack_set_timeout(ah, max_to); if (sta) { struct ath_node *an; @@ -292,15 +321,13 @@ EXPORT_SYMBOL(ath_dynack_sample_ack_ts); */ void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an) { - /* ackto = slottime + sifs + air delay */ - u32 ackto = 9 + 16 + 64; struct ath_dynack *da = &ah->dynack; - an->ackto = ackto; + an->ackto = da->ackto; - spin_lock(&da->qlock); + spin_lock_bh(&da->qlock); list_add_tail(&an->list, &da->nodes); - spin_unlock(&da->qlock); + spin_unlock_bh(&da->qlock); } EXPORT_SYMBOL(ath_dynack_node_init); @@ -314,9 +341,9 @@ void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an) { struct ath_dynack *da = &ah->dynack; - spin_lock(&da->qlock); + spin_lock_bh(&da->qlock); list_del(&an->list); - spin_unlock(&da->qlock); + spin_unlock_bh(&da->qlock); } EXPORT_SYMBOL(ath_dynack_node_deinit); @@ -327,22 +354,26 @@ EXPORT_SYMBOL(ath_dynack_node_deinit); */ void ath_dynack_reset(struct ath_hw *ah) { - /* ackto = slottime + sifs + air delay */ - u32 ackto = 9 + 16 + 64; struct ath_dynack *da = &ah->dynack; + struct ath_node *an; + + spin_lock_bh(&da->qlock); - da->lto = jiffies; - da->ackto = ackto; + da->lto = jiffies + COMPUTE_TO; da->st_rbf.t_rb = 0; da->st_rbf.h_rb = 0; da->ack_rbf.t_rb = 0; da->ack_rbf.h_rb = 0; + da->ackto = ath_dynack_get_max_to(ah); + list_for_each_entry(an, &da->nodes, list) + an->ackto = da->ackto; + /* init acktimeout */ - ath9k_hw_setslottime(ah, (ackto - 3) / 2); - ath9k_hw_set_ack_timeout(ah, ackto); - ath9k_hw_set_cts_timeout(ah, ackto); + ath_dynack_set_timeout(ah, da->ackto); + + spin_unlock_bh(&da->qlock); } EXPORT_SYMBOL(ath_dynack_reset); @@ -359,6 +390,8 @@ void ath_dynack_init(struct ath_hw *ah) spin_lock_init(&da->qlock); INIT_LIST_HEAD(&da->nodes); + /* ackto = slottime + sifs + air delay */ + da->ackto = 9 + 16 + 64; ah->hw->wiphy->features |= NL80211_FEATURE_ACKTO_ESTIMATION; } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 214c68269a69..d961095ab01f 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -463,7 +463,7 @@ static void ath9k_enable_rmw_buffer(void *hw_priv) atomic_inc(&priv->wmi->m_rmw_cnt); } -static u32 ath9k_reg_rmw_single(void *hw_priv, +static void ath9k_reg_rmw_single(void *hw_priv, u32 reg_offset, u32 set, u32 clr) { struct ath_hw *ah = hw_priv; @@ -471,7 +471,6 @@ static u32 ath9k_reg_rmw_single(void *hw_priv, struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; struct register_rmw buf, buf_ret; int ret; - u32 val = 0; buf.reg = cpu_to_be32(reg_offset); buf.set = cpu_to_be32(set); @@ -485,7 +484,6 @@ static u32 ath9k_reg_rmw_single(void *hw_priv, ath_dbg(common, WMI, "REGISTER RMW FAILED:(0x%04x, %d)\n", reg_offset, ret); } - return val; } static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr) diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 1bf63a4efb4c..d091c8ebdcf0 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -170,6 +170,7 @@ static int htc_config_pipe_credits(struct htc_target *target) time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC credit config timeout\n"); + kfree_skb(skb); return -ETIMEDOUT; } @@ -205,6 +206,7 @@ static int htc_setup_complete(struct htc_target *target) time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC start timeout\n"); + kfree_skb(skb); return -ETIMEDOUT; } @@ -277,6 +279,7 @@ int htc_connect_service(struct htc_target *target, if (!time_left) { dev_err(target->dev, "Service connection timeout for: %d\n", service_connreq->service_id); + kfree_skb(skb); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index f23cb2f3d296..34121fbf32e3 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2392,7 +2392,8 @@ out: return ret; } -static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw) +static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index d1f6710ca63b..cdc146091194 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -336,6 +336,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); + kfree_skb(skb); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 99f1897a775d..486957a04bd1 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -1107,12 +1107,10 @@ static int carl9170_usb_probe(struct usb_interface *intf, static void carl9170_usb_disconnect(struct usb_interface *intf) { struct ar9170 *ar = usb_get_intfdata(intf); - struct usb_device *udev; if (WARN_ON(!ar)) return; - udev = ar->udev; wait_for_completion(&ar->fw_load_wait); if (IS_INITIALIZED(ar)) { diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 1d2d698fb779..523550f94a3f 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -641,52 +641,58 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct cfg80211_scan_request *req) { struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); - struct wcn36xx_hal_start_scan_offload_req_msg msg_body; + struct wcn36xx_hal_start_scan_offload_req_msg *msg_body; int ret, i; if (req->ie_len > WCN36XX_MAX_SCAN_IE_LEN) return -EINVAL; mutex_lock(&wcn->hal_mutex); - INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ); + msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL); + if (!msg_body) { + ret = -ENOMEM; + goto out; + } - msg_body.scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE; - msg_body.min_ch_time = 30; - msg_body.max_ch_time = 100; - msg_body.scan_hidden = 1; - memcpy(msg_body.mac, vif->addr, ETH_ALEN); - msg_body.bss_type = vif_priv->bss_type; - msg_body.p2p_search = vif->p2p; + INIT_HAL_MSG((*msg_body), WCN36XX_HAL_START_SCAN_OFFLOAD_REQ); - msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids)); - for (i = 0; i < msg_body.num_ssid; i++) { - msg_body.ssids[i].length = min_t(u8, req->ssids[i].ssid_len, - sizeof(msg_body.ssids[i].ssid)); - memcpy(msg_body.ssids[i].ssid, req->ssids[i].ssid, - msg_body.ssids[i].length); + msg_body->scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE; + msg_body->min_ch_time = 30; + msg_body->max_ch_time = 100; + msg_body->scan_hidden = 1; + memcpy(msg_body->mac, vif->addr, ETH_ALEN); + msg_body->bss_type = vif_priv->bss_type; + msg_body->p2p_search = vif->p2p; + + msg_body->num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body->ssids)); + for (i = 0; i < msg_body->num_ssid; i++) { + msg_body->ssids[i].length = min_t(u8, req->ssids[i].ssid_len, + sizeof(msg_body->ssids[i].ssid)); + memcpy(msg_body->ssids[i].ssid, req->ssids[i].ssid, + msg_body->ssids[i].length); } - msg_body.num_channel = min_t(u8, req->n_channels, - sizeof(msg_body.channels)); - for (i = 0; i < msg_body.num_channel; i++) - msg_body.channels[i] = req->channels[i]->hw_value; + msg_body->num_channel = min_t(u8, req->n_channels, + sizeof(msg_body->channels)); + for (i = 0; i < msg_body->num_channel; i++) + msg_body->channels[i] = req->channels[i]->hw_value; - msg_body.header.len -= WCN36XX_MAX_SCAN_IE_LEN; + msg_body->header.len -= WCN36XX_MAX_SCAN_IE_LEN; if (req->ie_len > 0) { - msg_body.ie_len = req->ie_len; - msg_body.header.len += req->ie_len; - memcpy(msg_body.ie, req->ie, req->ie_len); + msg_body->ie_len = req->ie_len; + msg_body->header.len += req->ie_len; + memcpy(msg_body->ie, req->ie, req->ie_len); } - PREPARE_HAL_BUF(wcn->hal_buf, msg_body); + PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body)); wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start hw-scan (channels: %u; ssids: %u; p2p: %s)\n", - msg_body.num_channel, msg_body.num_ssid, - msg_body.p2p_search ? "yes" : "no"); + msg_body->num_channel, msg_body->num_ssid, + msg_body->p2p_search ? "yes" : "no"); - ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); + ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len); if (ret) { wcn36xx_err("Sending hal_start_scan_offload failed\n"); goto out; @@ -698,6 +704,7 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, goto out; } out: + kfree(msg_body); mutex_unlock(&wcn->hal_mutex); return ret; } @@ -1257,96 +1264,104 @@ out: static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn, const struct wcn36xx_hal_config_bss_req_msg *orig) { - struct wcn36xx_hal_config_bss_req_msg_v1 msg_body; - struct wcn36xx_hal_config_bss_params_v1 *bss = &msg_body.bss_params; - struct wcn36xx_hal_config_sta_params_v1 *sta = &bss->sta; + struct wcn36xx_hal_config_bss_req_msg_v1 *msg_body; + struct wcn36xx_hal_config_bss_params_v1 *bss; + struct wcn36xx_hal_config_sta_params_v1 *sta; + int ret; + + msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL); + if (!msg_body) + return -ENOMEM; + + INIT_HAL_MSG((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ); - INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_BSS_REQ); + bss = &msg_body->bss_params; + sta = &bss->sta; /* convert orig to v1 */ - memcpy(&msg_body.bss_params.bssid, + memcpy(&msg_body->bss_params.bssid, &orig->bss_params.bssid, ETH_ALEN); - memcpy(&msg_body.bss_params.self_mac_addr, + memcpy(&msg_body->bss_params.self_mac_addr, &orig->bss_params.self_mac_addr, ETH_ALEN); - msg_body.bss_params.bss_type = orig->bss_params.bss_type; - msg_body.bss_params.oper_mode = orig->bss_params.oper_mode; - msg_body.bss_params.nw_type = orig->bss_params.nw_type; + msg_body->bss_params.bss_type = orig->bss_params.bss_type; + msg_body->bss_params.oper_mode = orig->bss_params.oper_mode; + msg_body->bss_params.nw_type = orig->bss_params.nw_type; - msg_body.bss_params.short_slot_time_supported = + msg_body->bss_params.short_slot_time_supported = orig->bss_params.short_slot_time_supported; - msg_body.bss_params.lla_coexist = orig->bss_params.lla_coexist; - msg_body.bss_params.llb_coexist = orig->bss_params.llb_coexist; - msg_body.bss_params.llg_coexist = orig->bss_params.llg_coexist; - msg_body.bss_params.ht20_coexist = orig->bss_params.ht20_coexist; - msg_body.bss_params.lln_non_gf_coexist = + msg_body->bss_params.lla_coexist = orig->bss_params.lla_coexist; + msg_body->bss_params.llb_coexist = orig->bss_params.llb_coexist; + msg_body->bss_params.llg_coexist = orig->bss_params.llg_coexist; + msg_body->bss_params.ht20_coexist = orig->bss_params.ht20_coexist; + msg_body->bss_params.lln_non_gf_coexist = orig->bss_params.lln_non_gf_coexist; - msg_body.bss_params.lsig_tx_op_protection_full_support = + msg_body->bss_params.lsig_tx_op_protection_full_support = orig->bss_params.lsig_tx_op_protection_full_support; - msg_body.bss_params.rifs_mode = orig->bss_params.rifs_mode; - msg_body.bss_params.beacon_interval = orig->bss_params.beacon_interval; - msg_body.bss_params.dtim_period = orig->bss_params.dtim_period; - msg_body.bss_params.tx_channel_width_set = + msg_body->bss_params.rifs_mode = orig->bss_params.rifs_mode; + msg_body->bss_params.beacon_interval = orig->bss_params.beacon_interval; + msg_body->bss_params.dtim_period = orig->bss_params.dtim_period; + msg_body->bss_params.tx_channel_width_set = orig->bss_params.tx_channel_width_set; - msg_body.bss_params.oper_channel = orig->bss_params.oper_channel; - msg_body.bss_params.ext_channel = orig->bss_params.ext_channel; + msg_body->bss_params.oper_channel = orig->bss_params.oper_channel; + msg_body->bss_params.ext_channel = orig->bss_params.ext_channel; - msg_body.bss_params.reserved = orig->bss_params.reserved; + msg_body->bss_params.reserved = orig->bss_params.reserved; - memcpy(&msg_body.bss_params.ssid, + memcpy(&msg_body->bss_params.ssid, &orig->bss_params.ssid, sizeof(orig->bss_params.ssid)); - msg_body.bss_params.action = orig->bss_params.action; - msg_body.bss_params.rateset = orig->bss_params.rateset; - msg_body.bss_params.ht = orig->bss_params.ht; - msg_body.bss_params.obss_prot_enabled = + msg_body->bss_params.action = orig->bss_params.action; + msg_body->bss_params.rateset = orig->bss_params.rateset; + msg_body->bss_params.ht = orig->bss_params.ht; + msg_body->bss_params.obss_prot_enabled = orig->bss_params.obss_prot_enabled; - msg_body.bss_params.rmf = orig->bss_params.rmf; - msg_body.bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode; - msg_body.bss_params.dual_cts_protection = + msg_body->bss_params.rmf = orig->bss_params.rmf; + msg_body->bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode; + msg_body->bss_params.dual_cts_protection = orig->bss_params.dual_cts_protection; - msg_body.bss_params.max_probe_resp_retry_limit = + msg_body->bss_params.max_probe_resp_retry_limit = orig->bss_params.max_probe_resp_retry_limit; - msg_body.bss_params.hidden_ssid = orig->bss_params.hidden_ssid; - msg_body.bss_params.proxy_probe_resp = + msg_body->bss_params.hidden_ssid = orig->bss_params.hidden_ssid; + msg_body->bss_params.proxy_probe_resp = orig->bss_params.proxy_probe_resp; - msg_body.bss_params.edca_params_valid = + msg_body->bss_params.edca_params_valid = orig->bss_params.edca_params_valid; - memcpy(&msg_body.bss_params.acbe, + memcpy(&msg_body->bss_params.acbe, &orig->bss_params.acbe, sizeof(orig->bss_params.acbe)); - memcpy(&msg_body.bss_params.acbk, + memcpy(&msg_body->bss_params.acbk, &orig->bss_params.acbk, sizeof(orig->bss_params.acbk)); - memcpy(&msg_body.bss_params.acvi, + memcpy(&msg_body->bss_params.acvi, &orig->bss_params.acvi, sizeof(orig->bss_params.acvi)); - memcpy(&msg_body.bss_params.acvo, + memcpy(&msg_body->bss_params.acvo, &orig->bss_params.acvo, sizeof(orig->bss_params.acvo)); - msg_body.bss_params.ext_set_sta_key_param_valid = + msg_body->bss_params.ext_set_sta_key_param_valid = orig->bss_params.ext_set_sta_key_param_valid; - memcpy(&msg_body.bss_params.ext_set_sta_key_param, + memcpy(&msg_body->bss_params.ext_set_sta_key_param, &orig->bss_params.ext_set_sta_key_param, sizeof(orig->bss_params.acvo)); - msg_body.bss_params.wcn36xx_hal_persona = + msg_body->bss_params.wcn36xx_hal_persona = orig->bss_params.wcn36xx_hal_persona; - msg_body.bss_params.spectrum_mgt_enable = + msg_body->bss_params.spectrum_mgt_enable = orig->bss_params.spectrum_mgt_enable; - msg_body.bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power; - msg_body.bss_params.max_tx_power = orig->bss_params.max_tx_power; + msg_body->bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power; + msg_body->bss_params.max_tx_power = orig->bss_params.max_tx_power; wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta, - &msg_body.bss_params.sta); + &msg_body->bss_params.sta); - PREPARE_HAL_BUF(wcn->hal_buf, msg_body); + PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body)); wcn36xx_dbg(WCN36XX_DBG_HAL, "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n", @@ -1358,7 +1373,10 @@ static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn, sta->bssid, sta->action, sta->sta_index, sta->bssid_index, sta->aid, sta->type, sta->mac); - return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); + ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len); + kfree(msg_body); + + return ret; } @@ -1410,16 +1428,21 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct ieee80211_sta *sta, const u8 *bssid, bool update) { - struct wcn36xx_hal_config_bss_req_msg msg; + struct wcn36xx_hal_config_bss_req_msg *msg; struct wcn36xx_hal_config_bss_params *bss; struct wcn36xx_hal_config_sta_params *sta_params; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); int ret; mutex_lock(&wcn->hal_mutex); - INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ); + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; + goto out; + } + INIT_HAL_MSG((*msg), WCN36XX_HAL_CONFIG_BSS_REQ); - bss = &msg.bss_params; + bss = &msg->bss_params; sta_params = &bss->sta; WARN_ON(is_zero_ether_addr(bssid)); @@ -1514,11 +1537,11 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif, sta_params->mac); if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) { - ret = wcn36xx_smd_config_bss_v1(wcn, &msg); + ret = wcn36xx_smd_config_bss_v1(wcn, msg); } else { - PREPARE_HAL_BUF(wcn->hal_buf, msg); + PREPARE_HAL_BUF(wcn->hal_buf, (*msg)); - ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len); + ret = wcn36xx_smd_send_and_wait(wcn, msg->header.len); } if (ret) { wcn36xx_err("Sending hal_config_bss failed\n"); @@ -1534,6 +1557,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif, goto out; } out: + kfree(msg); mutex_unlock(&wcn->hal_mutex); return ret; } diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 2fb4258941a5..c70854ea5634 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -25,6 +25,22 @@ #define WIL_MAX_ROC_DURATION_MS 5000 +#define WIL_EDMG_CHANNEL_9_SUBCHANNELS (BIT(0) | BIT(1)) +#define WIL_EDMG_CHANNEL_10_SUBCHANNELS (BIT(1) | BIT(2)) +#define WIL_EDMG_CHANNEL_11_SUBCHANNELS (BIT(2) | BIT(3)) + +/* WIL_EDMG_BW_CONFIGURATION define the allowed channel bandwidth + * configurations as defined by IEEE 802.11 section 9.4.2.251, Table 13. + * The value 5 allowing CB1 and CB2 of adjacent channels. + */ +#define WIL_EDMG_BW_CONFIGURATION 5 + +/* WIL_EDMG_CHANNELS is a bitmap that indicates the 2.16 GHz channel(s) that + * are allowed to be used for EDMG transmissions in the BSS as defined by + * IEEE 802.11 section 9.4.2.251. + */ +#define WIL_EDMG_CHANNELS (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + bool disable_ap_sme; module_param(disable_ap_sme, bool, 0444); MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME"); @@ -51,6 +67,39 @@ static struct ieee80211_channel wil_60ghz_channels[] = { CHAN60G(4, 0), }; +/* Rx channel bonding mode */ +enum wil_rx_cb_mode { + WIL_RX_CB_MODE_DMG, + WIL_RX_CB_MODE_EDMG, + WIL_RX_CB_MODE_WIDE, +}; + +static int wil_rx_cb_mode_to_n_bonded(u8 cb_mode) +{ + switch (cb_mode) { + case WIL_RX_CB_MODE_DMG: + case WIL_RX_CB_MODE_EDMG: + return 1; + case WIL_RX_CB_MODE_WIDE: + return 2; + default: + return 1; + } +} + +static int wil_tx_cb_mode_to_n_bonded(u8 cb_mode) +{ + switch (cb_mode) { + case WMI_TX_MODE_DMG: + case WMI_TX_MODE_EDMG_CB1: + return 1; + case WMI_TX_MODE_EDMG_CB2: + return 2; + default: + return 1; + } +} + static void wil_memdup_ie(u8 **pdst, size_t *pdst_len, const u8 *src, size_t src_len) { @@ -82,6 +131,13 @@ void update_supported_bands(struct wil6210_priv *wil) wiphy->bands[NL80211_BAND_60GHZ]->n_channels = wil_num_supported_channels(wil); + + if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) { + wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.channels = + WIL_EDMG_CHANNELS; + wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.bw_config = + WIL_EDMG_BW_CONFIGURATION; + } } /* Vendor id to be used in vendor specific command and events @@ -275,6 +331,8 @@ static const char * const key_usage_str[] = { [WMI_KEY_USE_PAIRWISE] = "PTK", [WMI_KEY_USE_RX_GROUP] = "RX_GTK", [WMI_KEY_USE_TX_GROUP] = "TX_GTK", + [WMI_KEY_USE_STORE_PTK] = "STORE_PTK", + [WMI_KEY_USE_APPLY_PTK] = "APPLY_PTK", }; int wil_iftype_nl2wmi(enum nl80211_iftype type) @@ -300,6 +358,86 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type) return -EOPNOTSUPP; } +int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch) +{ + switch (spec_ch) { + case 1: + *wmi_ch = WMI_CHANNEL_1; + break; + case 2: + *wmi_ch = WMI_CHANNEL_2; + break; + case 3: + *wmi_ch = WMI_CHANNEL_3; + break; + case 4: + *wmi_ch = WMI_CHANNEL_4; + break; + case 5: + *wmi_ch = WMI_CHANNEL_5; + break; + case 6: + *wmi_ch = WMI_CHANNEL_6; + break; + case 9: + *wmi_ch = WMI_CHANNEL_9; + break; + case 10: + *wmi_ch = WMI_CHANNEL_10; + break; + case 11: + *wmi_ch = WMI_CHANNEL_11; + break; + case 12: + *wmi_ch = WMI_CHANNEL_12; + break; + default: + return -EINVAL; + } + + return 0; +} + +int wil_wmi2spec_ch(u8 wmi_ch, u8 *spec_ch) +{ + switch (wmi_ch) { + case WMI_CHANNEL_1: + *spec_ch = 1; + break; + case WMI_CHANNEL_2: + *spec_ch = 2; + break; + case WMI_CHANNEL_3: + *spec_ch = 3; + break; + case WMI_CHANNEL_4: + *spec_ch = 4; + break; + case WMI_CHANNEL_5: + *spec_ch = 5; + break; + case WMI_CHANNEL_6: + *spec_ch = 6; + break; + case WMI_CHANNEL_9: + *spec_ch = 9; + break; + case WMI_CHANNEL_10: + *spec_ch = 10; + break; + case WMI_CHANNEL_11: + *spec_ch = 11; + break; + case WMI_CHANNEL_12: + *spec_ch = 12; + break; + default: + return -EINVAL; + } + + return 0; +} + int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, struct station_info *sinfo) { @@ -314,6 +452,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, } __packed reply; struct wil_net_stats *stats = &wil->sta[cid].stats; int rc; + u8 txflag = RATE_INFO_FLAGS_DMG; memset(&reply, 0, sizeof(reply)); @@ -327,7 +466,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, " MCS %d TSF 0x%016llx\n" " BF status 0x%08x RSSI %d SQI %d%%\n" " Tx Tpt %d goodput %d Rx goodput %d\n" - " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n", + " Sectors(rx:tx) my %d:%d peer %d:%d\n" + " Tx mode %d}\n", cid, vif->mid, le16_to_cpu(reply.evt.bf_mcs), le64_to_cpu(reply.evt.tsf), reply.evt.status, reply.evt.rssi, @@ -338,7 +478,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, le16_to_cpu(reply.evt.my_rx_sector), le16_to_cpu(reply.evt.my_tx_sector), le16_to_cpu(reply.evt.other_rx_sector), - le16_to_cpu(reply.evt.other_tx_sector)); + le16_to_cpu(reply.evt.other_tx_sector), + reply.evt.tx_mode); sinfo->generation = wil->sinfo_gen; @@ -351,9 +492,16 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) | BIT_ULL(NL80211_STA_INFO_TX_FAILED); - sinfo->txrate.flags = RATE_INFO_FLAGS_60G; + if (wil->use_enhanced_dma_hw && reply.evt.tx_mode != WMI_TX_MODE_DMG) + txflag = RATE_INFO_FLAGS_EDMG; + + sinfo->txrate.flags = txflag; sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs); sinfo->rxrate.mcs = stats->last_mcs_rx; + sinfo->txrate.n_bonded_ch = + wil_tx_cb_mode_to_n_bonded(reply.evt.tx_mode); + sinfo->rxrate.n_bonded_ch = + wil_rx_cb_mode_to_n_bonded(stats->last_cb_mode_rx); sinfo->rx_bytes = stats->rx_bytes; sinfo->rx_packets = stats->rx_packets; sinfo->rx_dropped_misc = stats->rx_dropped; @@ -396,7 +544,7 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy, /* * Find @idx-th active STA for specific MID for station dump. */ -static int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx) +int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx) { int i; @@ -1022,6 +1170,33 @@ static int wil_ft_connect(struct wiphy *wiphy, return rc; } +static int wil_get_wmi_edmg_channel(struct wil6210_priv *wil, u8 edmg_bw_config, + u8 edmg_channels, u8 *wmi_ch) +{ + if (!edmg_bw_config) { + *wmi_ch = 0; + return 0; + } else if (edmg_bw_config == WIL_EDMG_BW_CONFIGURATION) { + /* convert from edmg channel bitmap into edmg channel number */ + switch (edmg_channels) { + case WIL_EDMG_CHANNEL_9_SUBCHANNELS: + return wil_spec2wmi_ch(9, wmi_ch); + case WIL_EDMG_CHANNEL_10_SUBCHANNELS: + return wil_spec2wmi_ch(10, wmi_ch); + case WIL_EDMG_CHANNEL_11_SUBCHANNELS: + return wil_spec2wmi_ch(11, wmi_ch); + default: + wil_err(wil, "Unsupported edmg channel bitmap 0x%x\n", + edmg_channels); + return -EINVAL; + } + } else { + wil_err(wil, "Unsupported EDMG BW configuration %d\n", + edmg_bw_config); + return -EINVAL; + } +} + static int wil_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_connect_params *sme) @@ -1167,6 +1342,11 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, memcpy(conn.ssid, ssid_eid+2, conn.ssid_len); conn.channel = ch - 1; + rc = wil_get_wmi_edmg_channel(wil, sme->edmg.bw_config, + sme->edmg.channels, &conn.edmg_channel); + if (rc < 0) + return rc; + ether_addr_copy(conn.bssid, bss->bssid); ether_addr_copy(conn.dst_mac, bss->bssid); @@ -1376,6 +1556,7 @@ void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage, return; switch (key_usage) { + case WMI_KEY_USE_STORE_PTK: case WMI_KEY_USE_PAIRWISE: for (tid = 0; tid < WIL_STA_TID_NUM; tid++) { cc = &cs->tid_crypto_rx[tid].key_id[key_index]; @@ -1473,6 +1654,16 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, return -EINVAL; } + spin_lock_bh(&wil->eap_lock); + if (pairwise && wdev->iftype == NL80211_IFTYPE_STATION && + (vif->ptk_rekey_state == WIL_REKEY_M3_RECEIVED || + vif->ptk_rekey_state == WIL_REKEY_WAIT_M4_SENT)) { + key_usage = WMI_KEY_USE_STORE_PTK; + vif->ptk_rekey_state = WIL_REKEY_WAIT_M4_SENT; + wil_dbg_misc(wil, "Store EAPOL key\n"); + } + spin_unlock_bh(&wil->eap_lock); + rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len, params->key, key_usage); if (!rc && !IS_ERR(cs)) { @@ -1728,7 +1919,7 @@ out: static int _wil_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, const u8 *ssid, size_t ssid_len, u32 privacy, - int bi, u8 chan, + int bi, u8 chan, u8 wmi_edmg_channel, struct cfg80211_beacon_data *bcon, u8 hidden_ssid, u32 pbss) { @@ -1791,6 +1982,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, vif->privacy = privacy; vif->channel = chan; + vif->wmi_edmg_channel = wmi_edmg_channel; vif->hidden_ssid = hidden_ssid; vif->pbss = pbss; vif->bi = bi; @@ -1801,7 +1993,8 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, if (!wil_has_other_active_ifaces(wil, ndev, false, true)) wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS); - rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, hidden_ssid, is_go); + rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, wmi_edmg_channel, + hidden_ssid, is_go); if (rc) goto err_pcp_start; @@ -1853,7 +2046,8 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil) rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid, vif->ssid_len, vif->privacy, vif->bi, - vif->channel, &bcon, + vif->channel, + vif->wmi_edmg_channel, &bcon, vif->hidden_ssid, vif->pbss); if (rc) { wil_err(wil, "vif %d recovery failed (%d)\n", i, rc); @@ -1903,7 +2097,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid, vif->ssid_len, privacy, wdev->beacon_interval, - vif->channel, bcon, + vif->channel, + vif->wmi_edmg_channel, bcon, vif->hidden_ssid, vif->pbss); } else { @@ -1922,10 +2117,17 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct ieee80211_channel *channel = info->chandef.chan; struct cfg80211_beacon_data *bcon = &info->beacon; struct cfg80211_crypto_settings *crypto = &info->crypto; + u8 wmi_edmg_channel; u8 hidden_ssid; wil_dbg_misc(wil, "start_ap\n"); + rc = wil_get_wmi_edmg_channel(wil, info->chandef.edmg.bw_config, + info->chandef.edmg.channels, + &wmi_edmg_channel); + if (rc < 0) + return rc; + if (!channel) { wil_err(wil, "AP: No channel???\n"); return -EINVAL; @@ -1965,7 +2167,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, rc = _wil_cfg80211_start_ap(wiphy, ndev, info->ssid, info->ssid_len, info->privacy, info->beacon_interval, channel->hw_value, - bcon, hidden_ssid, info->pbss); + wmi_edmg_channel, bcon, hidden_ssid, + info->pbss); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 74834131cf7c..304b4d4e506a 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -393,7 +393,8 @@ static int wil_debugfs_iomem_x32_set(void *data, u64 val) if (ret < 0) return ret; - writel(val, (void __iomem *)d->offset); + writel_relaxed(val, (void __iomem *)d->offset); + wmb(); /* make sure write propagated to HW */ wil_pm_runtime_put(wil); @@ -959,6 +960,18 @@ static const struct file_operations fops_pmcdata = { .llseek = wil_pmc_llseek, }; +static int wil_pmcring_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, wil_pmcring_read, inode->i_private); +} + +static const struct file_operations fops_pmcring = { + .open = wil_pmcring_seq_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, +}; + /*---tx_mgmt---*/ /* Write mgmt frame to this file to send it */ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, @@ -1052,8 +1065,7 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb) if (nr_frags) { seq_printf(s, " nr_frags = %d\n", nr_frags); for (i = 0; i < nr_frags; i++) { - const struct skb_frag_struct *frag = - &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); p = skb_frag_address_safe(frag); @@ -2372,6 +2384,7 @@ static const struct { {"back", 0644, &fops_back}, {"pmccfg", 0644, &fops_pmccfg}, {"pmcdata", 0444, &fops_pmcdata}, + {"pmcring", 0444, &fops_pmcring}, {"temp", 0444, &temp_fops}, {"freq", 0444, &freq_fops}, {"link", 0444, &link_fops}, diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 173561fe593d..9b72202eeadc 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -373,6 +373,7 @@ static void _wil6210_disconnect_complete(struct wil6210_vif *vif, } clear_bit(wil_vif_fwconnecting, vif->status); clear_bit(wil_vif_ft_roam, vif->status); + vif->ptk_rekey_state = WIL_REKEY_IDLE; break; case NL80211_IFTYPE_AP: @@ -724,6 +725,8 @@ int wil_priv_init(struct wil6210_priv *wil) INIT_LIST_HEAD(&wil->pending_wmi_ev); spin_lock_init(&wil->wmi_ev_lock); spin_lock_init(&wil->net_queue_lock); + spin_lock_init(&wil->eap_lock); + init_waitqueue_head(&wil->wq); init_rwsem(&wil->mem_lock); @@ -1654,6 +1657,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) cancel_work_sync(&vif->disconnect_worker); wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING); + vif->ptk_rekey_state = WIL_REKEY_IDLE; } } wil_bcast_fini_all(wil); diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 59f041d708fe..a87bb84a8286 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -218,6 +218,7 @@ static void wil_vif_deinit(struct wil6210_vif *vif) cancel_work_sync(&vif->p2p.delayed_listen_work); wil_probe_client_flush(vif); cancel_work_sync(&vif->probe_client_worker); + cancel_work_sync(&vif->enable_tx_key_worker); } void wil_vif_free(struct wil6210_vif *vif) @@ -283,7 +284,9 @@ static void wil_vif_init(struct wil6210_vif *vif) INIT_WORK(&vif->probe_client_worker, wil_probe_client_worker); INIT_WORK(&vif->disconnect_worker, wil_disconnect_worker); + INIT_WORK(&vif->p2p.discovery_expired_work, wil_p2p_listen_expired); INIT_WORK(&vif->p2p.delayed_listen_work, wil_p2p_delayed_listen_work); + INIT_WORK(&vif->enable_tx_key_worker, wil_enable_tx_key_worker); INIT_LIST_HEAD(&vif->probe_client_pending); @@ -540,6 +543,7 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid) cancel_work_sync(&vif->disconnect_worker); wil_probe_client_flush(vif); cancel_work_sync(&vif->probe_client_worker); + cancel_work_sync(&vif->enable_tx_key_worker); /* for VIFs, ndev will be freed by destructor after RTNL is unlocked. * the main interface will be freed in wil_if_free, we need to keep it * a bit longer so logging macros will work. diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 9f5a914abc18..18dd8b246022 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -435,7 +435,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) mutex_unlock(&wil->mutex); if (rc) { wil_err(wil, "failed to load WMI only FW\n"); - goto if_remove; + /* ignore the error to allow debugging */ } } @@ -455,8 +455,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; -if_remove: - wil_if_remove(wil); bus_disable: wil_if_pcie_disable(wil); err_iounmap: diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c index c49f7988369e..4b7ac14fc2a7 100644 --- a/drivers/net/wireless/ath/wil6210/pmc.c +++ b/drivers/net/wireless/ath/wil6210/pmc.c @@ -18,6 +18,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/fs.h> +#include <linux/seq_file.h> #include "wmi.h" #include "wil6210.h" #include "txrx.h" @@ -431,3 +432,28 @@ out: return newpos; } + +int wil_pmcring_read(struct seq_file *s, void *data) +{ + struct wil6210_priv *wil = s->private; + struct pmc_ctx *pmc = &wil->pmc; + size_t pmc_ring_size = + sizeof(struct vring_rx_desc) * pmc->num_descriptors; + + mutex_lock(&pmc->lock); + + if (!wil_is_pmc_allocated(pmc)) { + wil_err(wil, "error, pmc is not allocated!\n"); + pmc->last_cmd_status = -EPERM; + mutex_unlock(&pmc->lock); + return -EPERM; + } + + wil_dbg_misc(wil, "pmcring_read: size %zu\n", pmc_ring_size); + + seq_write(s, pmc->pring_va, pmc_ring_size); + + mutex_unlock(&pmc->lock); + + return 0; +} diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h index bebc8d52e1e6..92b8c4d84a6a 100644 --- a/drivers/net/wireless/ath/wil6210/pmc.h +++ b/drivers/net/wireless/ath/wil6210/pmc.h @@ -25,3 +25,4 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd); int wil_pmc_last_cmd_status(struct wil6210_priv *wil); ssize_t wil_pmc_read(struct file *, char __user *, size_t, loff_t *); loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence); +int wil_pmcring_read(struct seq_file *s, void *data); diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 784239bcb3a6..13246d216803 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -260,7 +260,6 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, r->reorder_buf = kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL); if (!r->reorder_buf) { - kfree(r->reorder_buf); kfree(r); return NULL; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index eae00aafaa88..cb13652491ad 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -725,24 +725,198 @@ static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid, } /* + * Check if skb is ptk eapol key message + * + * returns a pointer to the start of the eapol key structure, NULL + * if frame is not PTK eapol key + */ +static struct wil_eapol_key *wil_is_ptk_eapol_key(struct wil6210_priv *wil, + struct sk_buff *skb) +{ + u8 *buf; + const struct wil_1x_hdr *hdr; + struct wil_eapol_key *key; + u16 key_info; + int len = skb->len; + + if (!skb_mac_header_was_set(skb)) { + wil_err(wil, "mac header was not set\n"); + return NULL; + } + + len -= skb_mac_offset(skb); + + if (len < sizeof(struct ethhdr) + sizeof(struct wil_1x_hdr) + + sizeof(struct wil_eapol_key)) + return NULL; + + buf = skb_mac_header(skb) + sizeof(struct ethhdr); + + hdr = (const struct wil_1x_hdr *)buf; + if (hdr->type != WIL_1X_TYPE_EAPOL_KEY) + return NULL; + + key = (struct wil_eapol_key *)(buf + sizeof(struct wil_1x_hdr)); + if (key->type != WIL_EAPOL_KEY_TYPE_WPA && + key->type != WIL_EAPOL_KEY_TYPE_RSN) + return NULL; + + key_info = be16_to_cpu(key->key_info); + if (!(key_info & WIL_KEY_INFO_KEY_TYPE)) /* check if pairwise */ + return NULL; + + return key; +} + +static bool wil_skb_is_eap_3(struct wil6210_priv *wil, struct sk_buff *skb) +{ + struct wil_eapol_key *key; + u16 key_info; + + key = wil_is_ptk_eapol_key(wil, skb); + if (!key) + return false; + + key_info = be16_to_cpu(key->key_info); + if (key_info & (WIL_KEY_INFO_MIC | + WIL_KEY_INFO_ENCR_KEY_DATA)) { + /* 3/4 of 4-Way Handshake */ + wil_dbg_misc(wil, "EAPOL key message 3\n"); + return true; + } + /* 1/4 of 4-Way Handshake */ + wil_dbg_misc(wil, "EAPOL key message 1\n"); + + return false; +} + +static bool wil_skb_is_eap_4(struct wil6210_priv *wil, struct sk_buff *skb) +{ + struct wil_eapol_key *key; + u32 *nonce, i; + + key = wil_is_ptk_eapol_key(wil, skb); + if (!key) + return false; + + nonce = (u32 *)key->key_nonce; + for (i = 0; i < WIL_EAP_NONCE_LEN / sizeof(u32); i++, nonce++) { + if (*nonce != 0) { + /* message 2/4 */ + wil_dbg_misc(wil, "EAPOL key message 2\n"); + return false; + } + } + wil_dbg_misc(wil, "EAPOL key message 4\n"); + + return true; +} + +void wil_enable_tx_key_worker(struct work_struct *work) +{ + struct wil6210_vif *vif = container_of(work, + struct wil6210_vif, enable_tx_key_worker); + struct wil6210_priv *wil = vif_to_wil(vif); + int rc, cid; + + rtnl_lock(); + if (vif->ptk_rekey_state != WIL_REKEY_WAIT_M4_SENT) { + wil_dbg_misc(wil, "Invalid rekey state = %d\n", + vif->ptk_rekey_state); + rtnl_unlock(); + return; + } + + cid = wil_find_cid_by_idx(wil, vif->mid, 0); + if (!wil_cid_valid(wil, cid)) { + wil_err(wil, "Invalid cid = %d\n", cid); + rtnl_unlock(); + return; + } + + wil_dbg_misc(wil, "Apply PTK key after eapol was sent out\n"); + rc = wmi_add_cipher_key(vif, 0, wil->sta[cid].addr, 0, NULL, + WMI_KEY_USE_APPLY_PTK); + + vif->ptk_rekey_state = WIL_REKEY_IDLE; + rtnl_unlock(); + + if (rc) + wil_err(wil, "Apply PTK key failed %d\n", rc); +} + +void wil_tx_complete_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct wireless_dev *wdev = vif_to_wdev(vif); + bool q = false; + + if (wdev->iftype != NL80211_IFTYPE_STATION || + !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities)) + return; + + /* check if skb is an EAP message 4/4 */ + if (!wil_skb_is_eap_4(wil, skb)) + return; + + spin_lock_bh(&wil->eap_lock); + switch (vif->ptk_rekey_state) { + case WIL_REKEY_IDLE: + /* ignore idle state, can happen due to M4 retransmission */ + break; + case WIL_REKEY_M3_RECEIVED: + vif->ptk_rekey_state = WIL_REKEY_IDLE; + break; + case WIL_REKEY_WAIT_M4_SENT: + q = true; + break; + default: + wil_err(wil, "Unknown rekey state = %d", + vif->ptk_rekey_state); + } + spin_unlock_bh(&wil->eap_lock); + + if (q) { + q = queue_work(wil->wmi_wq, &vif->enable_tx_key_worker); + wil_dbg_misc(wil, "queue_work of enable_tx_key_worker -> %d\n", + q); + } +} + +static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct wireless_dev *wdev = vif_to_wdev(vif); + + if (wdev->iftype != NL80211_IFTYPE_STATION || + !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities)) + return; + + /* check if skb is a EAP message 3/4 */ + if (!wil_skb_is_eap_3(wil, skb)) + return; + + if (vif->ptk_rekey_state == WIL_REKEY_IDLE) + vif->ptk_rekey_state = WIL_REKEY_M3_RECEIVED; +} + +/* * Pass Rx packet to the netif. Update statistics. * Called in softirq context (NAPI poll). */ -void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) +void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid, + struct wil_net_stats *stats, bool gro) { gro_result_t rc = GRO_NORMAL; struct wil6210_vif *vif = ndev_to_vif(ndev); struct wil6210_priv *wil = ndev_to_wil(ndev); struct wireless_dev *wdev = vif_to_wdev(vif); unsigned int len = skb->len; - int cid; - int security; u8 *sa, *da = wil_skb_get_da(skb); /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication * is not suitable, need to look at data */ int mcast = is_multicast_ether_addr(da); - struct wil_net_stats *stats; struct sk_buff *xmit_skb = NULL; static const char * const gro_res_str[] = { [GRO_MERGED] = "GRO_MERGED", @@ -753,25 +927,6 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) [GRO_CONSUMED] = "GRO_CONSUMED", }; - wil->txrx_ops.get_netif_rx_params(skb, &cid, &security); - - stats = &wil->sta[cid].stats; - - skb_orphan(skb); - - if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) { - rc = GRO_DROP; - dev_kfree_skb(skb); - stats->rx_replay++; - goto stats; - } - - /* check errors reported by HW and update statistics */ - if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) { - dev_kfree_skb(skb); - return; - } - if (wdev->iftype == NL80211_IFTYPE_STATION) { sa = wil_skb_get_sa(skb); if (mcast && ether_addr_equal(sa, ndev->dev_addr)) { @@ -817,7 +972,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) if (skb) { /* deliver to local stack */ skb->protocol = eth_type_trans(skb, ndev); skb->dev = ndev; - rc = napi_gro_receive(&wil->napi_rx, skb); + + if (skb->protocol == cpu_to_be16(ETH_P_PAE)) + wil_rx_handle_eapol(vif, skb); + + if (gro) + rc = napi_gro_receive(&wil->napi_rx, skb); + else + netif_rx_ni(skb); wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", len, gro_res_str[rc]); } @@ -837,6 +999,36 @@ stats: } } +void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) +{ + int cid, security; + struct wil6210_priv *wil = ndev_to_wil(ndev); + struct wil_net_stats *stats; + + wil->txrx_ops.get_netif_rx_params(skb, &cid, &security); + + stats = &wil->sta[cid].stats; + + skb_orphan(skb); + + if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) { + dev_kfree_skb(skb); + ndev->stats.rx_dropped++; + stats->rx_replay++; + stats->rx_dropped++; + wil_dbg_txrx(wil, "Rx drop %d bytes\n", skb->len); + return; + } + + /* check errors reported by HW and update statistics */ + if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) { + dev_kfree_skb(skb); + return; + } + + wil_netif_rx(skb, ndev, cid, stats, true); +} + /** * Proceed all completed skb's from Rx VRING * @@ -1657,7 +1849,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, len); } else { frag = &skb_shinfo(skb)->frags[f]; - len = frag->size; + len = skb_frag_size(frag); wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len); } @@ -1678,8 +1870,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, if (!headlen) { pa = skb_frag_dma_map(dev, frag, - frag->size - len, lenmss, - DMA_TO_DEVICE); + skb_frag_size(frag) - len, + lenmss, DMA_TO_DEVICE); vring->ctx[i].mapped_as = wil_mapped_as_page; } else { pa = dma_map_single(dev, @@ -1900,8 +2092,7 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif, /* middle segments */ for (; f < nr_frags; f++) { - const struct skb_frag_struct *frag = - &skb_shinfo(skb)->frags[f]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; int len = skb_frag_size(frag); *_d = *d; @@ -2321,6 +2512,10 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid) if (stats) stats->tx_errors++; } + + if (skb->protocol == cpu_to_be16(ETH_P_PAE)) + wil_tx_complete_handle_eapol(vif, skb); + wil_consume_skb(skb, d->dma.error == 0); } memset(ctx, 0, sizeof(*ctx)); diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index c0da1340c2d2..5120475b0cd7 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -423,6 +423,46 @@ struct vring_rx_mac { #define RX_DMA_STATUS_PHY_INFO BIT(6) #define RX_DMA_STATUS_FFM BIT(7) /* EtherType Flex Filter Match */ +/* IEEE 802.11, 8.5.2 EAPOL-Key frames */ +#define WIL_KEY_INFO_KEY_TYPE BIT(3) /* val of 1 = Pairwise, 0 = Group key */ + +#define WIL_KEY_INFO_MIC BIT(8) +#define WIL_KEY_INFO_ENCR_KEY_DATA BIT(12) /* for rsn only */ + +#define WIL_EAP_NONCE_LEN 32 +#define WIL_EAP_KEY_RSC_LEN 8 +#define WIL_EAP_REPLAY_COUNTER_LEN 8 +#define WIL_EAP_KEY_IV_LEN 16 +#define WIL_EAP_KEY_ID_LEN 8 + +enum { + WIL_1X_TYPE_EAP_PACKET = 0, + WIL_1X_TYPE_EAPOL_START = 1, + WIL_1X_TYPE_EAPOL_LOGOFF = 2, + WIL_1X_TYPE_EAPOL_KEY = 3, +}; + +#define WIL_EAPOL_KEY_TYPE_RSN 2 +#define WIL_EAPOL_KEY_TYPE_WPA 254 + +struct wil_1x_hdr { + u8 version; + u8 type; + __be16 length; + /* followed by data */ +} __packed; + +struct wil_eapol_key { + u8 type; + __be16 key_info; + __be16 key_length; + u8 replay_counter[WIL_EAP_REPLAY_COUNTER_LEN]; + u8 key_nonce[WIL_EAP_NONCE_LEN]; + u8 key_iv[WIL_EAP_KEY_IV_LEN]; + u8 key_rsc[WIL_EAP_KEY_RSC_LEN]; + u8 key_id[WIL_EAP_KEY_ID_LEN]; +} __packed; + struct vring_rx_dma { u32 d0; struct wil_ring_dma_addr addr; @@ -646,6 +686,8 @@ static inline void wil_skb_set_cid(struct sk_buff *skb, u8 cid) } void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev); +void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid, + struct wil_net_stats *stats, bool gro); void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb); void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif, u8 cid, u8 tid, u16 seq); diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index dc040cd4ab06..04d576deae72 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -221,10 +221,17 @@ static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil, } static inline -void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg) +void wil_get_next_rx_status_msg(struct wil_status_ring *sring, u8 *dr_bit, + void *msg) { - memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)), - sring->elem_size); + struct wil_rx_status_compressed *_msg; + + _msg = (struct wil_rx_status_compressed *) + (sring->va + (sring->elem_size * sring->swhead)); + *dr_bit = WIL_GET_BITS(_msg->d0, 31, 31); + /* make sure dr_bit is read before the rest of status msg */ + rmb(); + memcpy(msg, (void *)_msg, sring->elem_size); } static inline void wil_sring_advance_swhead(struct wil_status_ring *sring) @@ -587,8 +594,7 @@ static bool wil_is_rx_idle_edma(struct wil6210_priv *wil) if (!sring->va) continue; - wil_get_next_rx_status_msg(sring, msg); - dr_bit = wil_rx_status_get_desc_rdy_bit(msg); + wil_get_next_rx_status_msg(sring, &dr_bit, msg); /* Check if there are unhandled RX status messages */ if (dr_bit == sring->desc_rdy_pol) @@ -878,8 +884,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil, BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb)); again: - wil_get_next_rx_status_msg(sring, msg); - dr_bit = wil_rx_status_get_desc_rdy_bit(msg); + wil_get_next_rx_status_msg(sring, &dr_bit, msg); /* Completed handling all the ready status messages */ if (dr_bit != sring->desc_rdy_pol) @@ -959,8 +964,8 @@ again: } stats = &wil->sta[cid].stats; - if (unlikely(skb->len < ETH_HLEN)) { - wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len); + if (unlikely(dmalen < ETH_HLEN)) { + wil_dbg_txrx(wil, "Short frame, len = %d\n", dmalen); stats->rx_short_frame++; rxdata->skipping = true; goto skipping; @@ -1023,6 +1028,8 @@ skipping: stats->last_mcs_rx = wil_rx_status_get_mcs(msg); if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) stats->rx_per_mcs[stats->last_mcs_rx]++; + + stats->last_cb_mode_rx = wil_rx_status_get_cb_mode(msg); } if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status && @@ -1133,12 +1140,15 @@ static int wil_tx_desc_map_edma(union wil_tx_desc *desc, } static inline void -wil_get_next_tx_status_msg(struct wil_status_ring *sring, +wil_get_next_tx_status_msg(struct wil_status_ring *sring, u8 *dr_bit, struct wil_ring_tx_status *msg) { struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *) (sring->va + (sring->elem_size * sring->swhead)); + *dr_bit = _msg->desc_ready >> TX_STATUS_DESC_READY_POS; + /* make sure dr_bit is read before the rest of status msg */ + rmb(); *msg = *_msg; } @@ -1167,8 +1177,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil, int used_before_complete; int used_new; - wil_get_next_tx_status_msg(sring, &msg); - dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS; + wil_get_next_tx_status_msg(sring, &dr_bit, &msg); /* Process completion messages while DR bit has the expected polarity */ while (dr_bit == sring->desc_rdy_pol) { @@ -1255,6 +1264,10 @@ int wil_tx_sring_handler(struct wil6210_priv *wil, if (stats) stats->tx_errors++; } + + if (skb->protocol == cpu_to_be16(ETH_P_PAE)) + wil_tx_complete_handle_eapol(vif, skb); + wil_consume_skb(skb, msg.status == 0); } memset(ctx, 0, sizeof(*ctx)); @@ -1287,8 +1300,7 @@ again: wil_sring_advance_swhead(sring); - wil_get_next_tx_status_msg(sring, &msg); - dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS; + wil_get_next_tx_status_msg(sring, &dr_bit, &msg); } /* shall we wake net queues? */ @@ -1471,7 +1483,7 @@ static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil, /* Rest of the descriptors are from the SKB fragments */ for (f = 0; f < nr_frags; f++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; - int len = frag->size; + int len = skb_frag_size(frag); wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f, len, descs_used); diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h index e9e6ea9b16b9..136c51c338cf 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.h +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h @@ -366,6 +366,12 @@ static inline u8 wil_rx_status_get_mcs(void *msg) 16, 21); } +static inline u8 wil_rx_status_get_cb_mode(void *msg) +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1, + 22, 23); +} + static inline u16 wil_rx_status_get_flow_id(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, @@ -415,12 +421,6 @@ static inline u8 wil_rx_status_get_tid(void *msg) return val & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK; } -static inline int wil_rx_status_get_desc_rdy_bit(void *msg) -{ - return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, - 31, 31); -} - static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */ { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 6f456b311a39..0783c7963621 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -590,6 +590,7 @@ struct wil_net_stats { unsigned long rx_amsdu_error; /* eDMA specific */ unsigned long rx_csum_err; u16 last_mcs_rx; + u8 last_cb_mode_rx; u64 rx_per_mcs[WIL_MCS_MAX + 1]; u32 ft_roams; /* relevant in STA mode */ }; @@ -730,6 +731,12 @@ enum wil_sta_status { wil_sta_connected = 2, }; +enum wil_rekey_state { + WIL_REKEY_IDLE = 0, + WIL_REKEY_M3_RECEIVED = 1, + WIL_REKEY_WAIT_M4_SENT = 2, +}; + /** * struct wil_sta_info - data for peer * @@ -850,6 +857,7 @@ struct wil6210_vif { DECLARE_BITMAP(status, wil_vif_status_last); u32 privacy; /* secure connection? */ u16 channel; /* relevant in AP mode */ + u8 wmi_edmg_channel; /* relevant in AP mode */ u8 hidden_ssid; /* relevant in AP mode */ u32 ap_isolate; /* no intra-BSS communication */ bool pbss; @@ -877,6 +885,10 @@ struct wil6210_vif { int net_queue_stopped; /* netif_tx_stop_all_queues invoked */ bool fw_stats_ready; /* per-cid statistics are ready inside sta_info */ u64 fw_stats_tsf; /* measurement timestamp */ + + /* PTK rekey race prevention, this is relevant to station mode only */ + enum wil_rekey_state ptk_rekey_state; + struct work_struct enable_tx_key_worker; }; /** @@ -977,6 +989,7 @@ struct wil6210_priv { */ spinlock_t wmi_ev_lock; spinlock_t net_queue_lock; /* guarding stop/wake netif queue */ + spinlock_t eap_lock; /* guarding access to eap rekey fields */ struct napi_struct napi_rx; struct napi_struct napi_tx; struct net_device napi_ndev; /* dummy net_device serving all VIFs */ @@ -1144,7 +1157,7 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val) /** * wil_cid_valid - check cid is valid */ -static inline bool wil_cid_valid(struct wil6210_priv *wil, u8 cid) +static inline bool wil_cid_valid(struct wil6210_priv *wil, int cid) { return (cid >= 0 && cid < wil->max_assoc_sta); } @@ -1224,6 +1237,7 @@ int __wil_down(struct wil6210_priv *wil); void wil_refresh_fw_capabilities(struct wil6210_priv *wil); void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r); int wil_find_cid(struct wil6210_priv *wil, u8 mid, const u8 *mac); +int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx); void wil_set_ethtoolops(struct net_device *ndev); struct fw_map *wil_find_fw_mapping(const char *section); @@ -1335,7 +1349,7 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil); int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype, u8 chan, - u8 hidden_ssid, u8 is_go); + u8 edmg_chan, u8 hidden_ssid, u8 is_go); int wmi_pcp_stop(struct wil6210_vif *vif); int wmi_led_cfg(struct wil6210_priv *wil, bool enable); int wmi_abort_scan(struct wil6210_vif *vif); @@ -1349,6 +1363,7 @@ void wil6210_disconnect_complete(struct wil6210_vif *vif, const u8 *bssid, void wil_probe_client_flush(struct wil6210_vif *vif); void wil_probe_client_worker(struct work_struct *work); void wil_disconnect_worker(struct work_struct *work); +void wil_enable_tx_key_worker(struct work_struct *work); void wil_init_txrx_ops(struct wil6210_priv *wil); @@ -1365,6 +1380,8 @@ void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif, struct wil_ring *ring, bool check_stop); netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev); int wil_tx_complete(struct wil6210_vif *vif, int ringid); +void wil_tx_complete_handle_eapol(struct wil6210_vif *vif, + struct sk_buff *skb); void wil6210_unmask_irq_tx(struct wil6210_priv *wil); void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil); @@ -1412,6 +1429,10 @@ int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len, u8 channel, u16 duration_ms); int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold); +int wil_wmi2spec_ch(u8 wmi_ch, u8 *spec_ch); +int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch); +void wil_update_supported_bands(struct wil6210_priv *wil); + int reverse_memcmp(const void *cs, const void *ct, size_t count); /* WMI for enhanced DMA */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 475b1a233cc9..153b84447e40 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -878,6 +878,12 @@ static void wmi_evt_rx_mgmt(struct wil6210_vif *vif, int id, void *d, int len) if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) { struct cfg80211_bss *bss; + struct cfg80211_inform_bss bss_data = { + .chan = channel, + .scan_width = NL80211_BSS_CHAN_WIDTH_20, + .signal = signal, + .boottime_ns = ktime_to_ns(ktime_get_boottime()), + }; u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp); u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info); u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int); @@ -892,8 +898,9 @@ static void wmi_evt_rx_mgmt(struct wil6210_vif *vif, int id, void *d, int len) wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap); - bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame, - d_len, signal, GFP_KERNEL); + bss = cfg80211_inform_bss_frame_data(wiphy, &bss_data, + rx_mgmt_frame, + d_len, GFP_KERNEL); if (bss) { wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid); @@ -1332,6 +1339,12 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) cid = evt->cid; tid = evt->tid; } + + if (!wil_cid_valid(wil, cid)) { + wil_err(wil, "DELBA: Invalid CID %d\n", cid); + return; + } + wil_dbg_wmi(wil, "DELBA MID %d CID %d TID %d from %s reason %d\n", vif->mid, cid, tid, evt->from_initiator ? "originator" : "recipient", @@ -1385,6 +1398,10 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len) __le16 fc; u32 d_len; struct cfg80211_bss *bss; + struct cfg80211_inform_bss bss_data = { + .scan_width = NL80211_BSS_CHAN_WIDTH_20, + .boottime_ns = ktime_to_ns(ktime_get_boottime()), + }; if (flen < 0) { wil_err(wil, "sched scan result event too short, len %d\n", @@ -1427,8 +1444,10 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len) return; } - bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame, - d_len, signal, GFP_KERNEL); + bss_data.signal = signal; + bss_data.chan = channel; + bss = cfg80211_inform_bss_frame_data(wiphy, &bss_data, rx_mgmt_frame, + d_len, GFP_KERNEL); if (bss) { wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid); cfg80211_put_bss(wiphy, bss); @@ -2163,8 +2182,8 @@ int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold) return rc; } -int wmi_pcp_start(struct wil6210_vif *vif, - int bi, u8 wmi_nettype, u8 chan, u8 hidden_ssid, u8 is_go) +int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype, + u8 chan, u8 wmi_edmg_chan, u8 hidden_ssid, u8 is_go) { struct wil6210_priv *wil = vif_to_wil(vif); int rc; @@ -2174,6 +2193,7 @@ int wmi_pcp_start(struct wil6210_vif *vif, .network_type = wmi_nettype, .disable_sec_offload = 1, .channel = chan - 1, + .edmg_channel = wmi_edmg_chan, .pcp_max_assoc_sta = wil->max_assoc_sta, .hidden_ssid = hidden_ssid, .is_go = is_go, @@ -2437,10 +2457,17 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index, .key_len = key_len, }; - if (!key || (key_len > sizeof(cmd.key))) + if (key_len > sizeof(cmd.key)) return -EINVAL; - memcpy(cmd.key, key, key_len); + /* key len = 0 is allowed only for usage of WMI_KEY_USE_APPLY */ + if ((key_len == 0 || !key) && + key_usage != WMI_KEY_USE_APPLY_PTK) + return -EINVAL; + + if (key) + memcpy(cmd.key, key, key_len); + if (mac_addr) memcpy(cmd.mac, mac_addr, WMI_MAC_LEN); diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 3e37229b36b5..a2f7034489ae 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -97,6 +97,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13, WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14, WMI_FW_CAPABILITY_PNO = 15, + WMI_FW_CAPABILITY_CHANNEL_BONDING = 17, WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18, WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19, WMI_FW_CAPABILITY_MULTI_VIFS = 20, @@ -108,6 +109,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_CHANNEL_4 = 26, WMI_FW_CAPABILITY_IPA = 27, WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF = 30, + WMI_FW_CAPABILITY_SPLIT_REKEY = 31, WMI_FW_CAPABILITY_MAX, }; @@ -361,6 +363,19 @@ enum wmi_connect_ctrl_flag_bits { #define WMI_MAX_SSID_LEN (32) +enum wmi_channel { + WMI_CHANNEL_1 = 0x00, + WMI_CHANNEL_2 = 0x01, + WMI_CHANNEL_3 = 0x02, + WMI_CHANNEL_4 = 0x03, + WMI_CHANNEL_5 = 0x04, + WMI_CHANNEL_6 = 0x05, + WMI_CHANNEL_9 = 0x06, + WMI_CHANNEL_10 = 0x07, + WMI_CHANNEL_11 = 0x08, + WMI_CHANNEL_12 = 0x09, +}; + /* WMI_CONNECT_CMDID */ struct wmi_connect_cmd { u8 network_type; @@ -372,8 +387,12 @@ struct wmi_connect_cmd { u8 group_crypto_len; u8 ssid_len; u8 ssid[WMI_MAX_SSID_LEN]; + /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is + * the primary channel number + */ u8 channel; - u8 reserved0; + /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */ + u8 edmg_channel; u8 bssid[WMI_MAC_LEN]; __le32 ctrl_flags; u8 dst_mac[WMI_MAC_LEN]; @@ -403,6 +422,8 @@ enum wmi_key_usage { WMI_KEY_USE_PAIRWISE = 0x00, WMI_KEY_USE_RX_GROUP = 0x01, WMI_KEY_USE_TX_GROUP = 0x02, + WMI_KEY_USE_STORE_PTK = 0x03, + WMI_KEY_USE_APPLY_PTK = 0x04, }; struct wmi_add_cipher_key_cmd { @@ -2312,8 +2333,12 @@ struct wmi_notify_req_done_event { /* WMI_CONNECT_EVENTID */ struct wmi_connect_event { + /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is + * the primary channel number + */ u8 channel; - u8 reserved0; + /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */ + u8 edmg_channel; u8 bssid[WMI_MAC_LEN]; __le16 listen_interval; __le16 beacon_interval; diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c index add7a0ff75b8..a659259bc51a 100644 --- a/drivers/net/wireless/broadcom/b43legacy/phy.c +++ b/drivers/net/wireless/broadcom/b43legacy/phy.c @@ -69,17 +69,6 @@ static const s8 b43legacy_tssi2dbm_g_table[] = { static void b43legacy_phy_initg(struct b43legacy_wldev *dev); - -static inline -void b43legacy_voluntary_preempt(void) -{ - B43legacy_BUG_ON(!(!in_atomic() && !in_irq() && - !in_interrupt() && !irqs_disabled())); -#ifndef CONFIG_PREEMPT - cond_resched(); -#endif /* CONFIG_PREEMPT */ -} - /* Lock the PHY registers against concurrent access from the microcode. * This lock is nonrecursive. */ void b43legacy_phy_lock(struct b43legacy_wldev *dev) @@ -1124,7 +1113,7 @@ static u16 b43legacy_phy_lo_b_r15_loop(struct b43legacy_wldev *dev) ret += b43legacy_phy_read(dev, 0x002C); } local_irq_restore(flags); - b43legacy_voluntary_preempt(); + cond_resched(); return ret; } @@ -1253,7 +1242,7 @@ u16 b43legacy_phy_lo_g_deviation_subval(struct b43legacy_wldev *dev, } ret = b43legacy_phy_read(dev, 0x002D); local_irq_restore(flags); - b43legacy_voluntary_preempt(); + cond_resched(); return ret; } @@ -1591,7 +1580,7 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev) b43legacy_radio_write16(dev, 0x43, i); b43legacy_radio_write16(dev, 0x52, phy->txctl2); udelay(10); - b43legacy_voluntary_preempt(); + cond_resched(); b43legacy_phy_set_baseband_attenuation(dev, j * 2); @@ -1642,7 +1631,7 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev) phy->txctl2 | (3/*txctl1*/ << 4)); udelay(10); - b43legacy_voluntary_preempt(); + cond_resched(); b43legacy_phy_set_baseband_attenuation(dev, j * 2); @@ -1665,7 +1654,7 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev) b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA2); udelay(2); b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA3); - b43legacy_voluntary_preempt(); + cond_resched(); } else b43legacy_phy_write(dev, 0x0015, r27 | 0xEFA0); b43legacy_phy_lo_adjust(dev, is_initializing); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 322e913ca7aa..2c95a08a5871 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -479,18 +479,11 @@ fail: return -ENOMEM; } -void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) -{ - struct brcmf_bcdc *bcdc = drvr->proto->pd; - - brcmf_fws_detach_pre_delif(bcdc->fws); -} - -void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) +void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) { struct brcmf_bcdc *bcdc = drvr->proto->pd; drvr->proto->pd = NULL; - brcmf_fws_detach_post_delif(bcdc->fws); + brcmf_fws_detach(bcdc->fws); kfree(bcdc); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h index 102e6938905c..b051d2860cd1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h @@ -7,16 +7,14 @@ #ifdef CONFIG_BRCMFMAC_PROTO_BCDC int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); -void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr); -void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr); +void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state); void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp, bool success); struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr); #else static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; } -static void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) {}; -static inline void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) {} +static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {} #endif #endif /* BRCMFMAC_BCDC_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index 0988a166a785..623c0168da79 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -253,10 +253,12 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event); /* Receive async event packet from firmware. Callee disposes of rxp. */ void brcmf_rx_event(struct device *dev, struct sk_buff *rxp); +int brcmf_alloc(struct device *dev, struct brcmf_mp_device *settings); /* Indication from bus module regarding presence/insertion of dongle. */ -int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings); +int brcmf_attach(struct device *dev); /* Indication from bus module regarding removal/absence of dongle */ void brcmf_detach(struct device *dev); +void brcmf_free(struct device *dev); /* Indication from bus module that dongle should be reset */ void brcmf_dev_reset(struct device *dev); /* Request from bus module to initiate a coredump */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index b6d0df354b36..e3ebb7abbdae 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -189,9 +189,9 @@ static const struct ieee80211_regdomain brcmf_regdom = { */ REG_RULE(2484-10, 2484+10, 20, 6, 20, 0), /* IEEE 802.11a, channel 36..64 */ - REG_RULE(5150-10, 5350+10, 80, 6, 20, 0), + REG_RULE(5150-10, 5350+10, 160, 6, 20, 0), /* IEEE 802.11a, channel 100..165 */ - REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), } + REG_RULE(5470-10, 5850+10, 160, 6, 20, 0), } }; /* Note: brcmf_cipher_suites is an array of int defining which cipher suites @@ -276,8 +276,26 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, else ch_inf.sb = BRCMU_CHAN_SB_UU; break; - case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: + ch_inf.bw = BRCMU_CHAN_BW_160; + if (primary_offset == -70) + ch_inf.sb = BRCMU_CHAN_SB_LLL; + else if (primary_offset == -50) + ch_inf.sb = BRCMU_CHAN_SB_LLU; + else if (primary_offset == -30) + ch_inf.sb = BRCMU_CHAN_SB_LUL; + else if (primary_offset == -10) + ch_inf.sb = BRCMU_CHAN_SB_LUU; + else if (primary_offset == 10) + ch_inf.sb = BRCMU_CHAN_SB_ULL; + else if (primary_offset == 30) + ch_inf.sb = BRCMU_CHAN_SB_ULU; + else if (primary_offset == 50) + ch_inf.sb = BRCMU_CHAN_SB_UUL; + else + ch_inf.sb = BRCMU_CHAN_SB_UUU; + break; + case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: default: @@ -296,6 +314,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, } d11inf->encchspec(&ch_inf); + brcmf_dbg(TRACE, "chanspec: 0x%x\n", ch_inf.chspec); return ch_inf.chspec; } @@ -1267,17 +1286,21 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy); struct brcmf_pub *drvr = cfg->pub; + bool bus_up = drvr->bus_if->state == BRCMF_BUS_UP; s32 err = 0; brcmf_dbg(TRACE, "Enter\n"); if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state)) { - brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n"); - err = brcmf_fil_cmd_data_set(vif->ifp, - BRCMF_C_DISASSOC, NULL, 0); - if (err) { - bphy_err(drvr, "WLC_DISASSOC failed (%d)\n", err); + if (bus_up) { + brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n"); + err = brcmf_fil_cmd_data_set(vif->ifp, + BRCMF_C_DISASSOC, NULL, 0); + if (err) + bphy_err(drvr, "WLC_DISASSOC failed (%d)\n", + err); } + if ((vif->wdev.iftype == NL80211_IFTYPE_STATION) || (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0, @@ -1287,7 +1310,8 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status); brcmf_btcoex_set_mode(vif, BRCMF_BTCOEX_ENABLED, 0); if (vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_NONE) { - brcmf_set_pmk(vif->ifp, NULL, 0); + if (bus_up) + brcmf_set_pmk(vif->ifp, NULL, 0); vif->profile.use_fwsup = BRCMF_PROFILE_FWSUP_NONE; } brcmf_dbg(TRACE, "Exit\n"); @@ -2958,8 +2982,6 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg, struct brcmf_pub *drvr = cfg->pub; struct brcmf_bss_info_le *bi; const struct brcmf_tlv *tim; - u16 beacon_interval; - u8 dtim_period; size_t ie_len; u8 *ie; s32 err = 0; @@ -2983,12 +3005,9 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg, ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset); ie_len = le32_to_cpu(bi->ie_length); - beacon_interval = le16_to_cpu(bi->beacon_period); tim = brcmf_parse_tlvs(ie, ie_len, WLAN_EID_TIM); - if (tim) - dtim_period = tim->data[1]; - else { + if (!tim) { /* * active scan was done so we could not get dtim * information out of probe response. @@ -3000,7 +3019,6 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg, bphy_err(drvr, "wl dtim_assoc failed (%d)\n", err); goto update_bss_info_out; } - dtim_period = (u8)var; } update_bss_info_out: @@ -4204,10 +4222,8 @@ brcmf_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len, vndr_ies->count++; - brcmf_dbg(TRACE, "** OUI %02x %02x %02x, type 0x%02x\n", - parsed_info->vndrie.oui[0], - parsed_info->vndrie.oui[1], - parsed_info->vndrie.oui[2], + brcmf_dbg(TRACE, "** OUI %3ph, type 0x%02x\n", + parsed_info->vndrie.oui, parsed_info->vndrie.oui_type); if (vndr_ies->count >= VNDR_IE_PARSE_LIMIT) @@ -4226,9 +4242,7 @@ next: static u32 brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd) { - - strncpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN - 1); - iebuf[VNDR_IE_CMD_LEN - 1] = '\0'; + strscpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN); put_unaligned_le32(1, &iebuf[VNDR_IE_COUNT_OFFSET]); @@ -4333,12 +4347,10 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, for (i = 0; i < old_vndr_ies.count; i++) { vndrie_info = &old_vndr_ies.ie_info[i]; - brcmf_dbg(TRACE, "DEL ID : %d, Len: %d , OUI:%02x:%02x:%02x\n", + brcmf_dbg(TRACE, "DEL ID : %d, Len: %d , OUI:%3ph\n", vndrie_info->vndrie.id, vndrie_info->vndrie.len, - vndrie_info->vndrie.oui[0], - vndrie_info->vndrie.oui[1], - vndrie_info->vndrie.oui[2]); + vndrie_info->vndrie.oui); del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag, vndrie_info->ie_ptr, @@ -4370,12 +4382,10 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, remained_buf_len -= (vndrie_info->ie_len + VNDR_IE_VSIE_OFFSET); - brcmf_dbg(TRACE, "ADDED ID : %d, Len: %d, OUI:%02x:%02x:%02x\n", + brcmf_dbg(TRACE, "ADDED ID : %d, Len: %d, OUI:%3ph\n", vndrie_info->vndrie.id, vndrie_info->vndrie.len, - vndrie_info->vndrie.oui[0], - vndrie_info->vndrie.oui[1], - vndrie_info->vndrie.oui[2]); + vndrie_info->vndrie.oui); del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag, vndrie_info->ie_ptr, @@ -4985,18 +4995,16 @@ static int brcmf_cfg80211_get_channel(struct wiphy *wiphy, struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct net_device *ndev = wdev->netdev; struct brcmf_pub *drvr = cfg->pub; - struct brcmf_if *ifp; struct brcmu_chan ch; enum nl80211_band band = 0; enum nl80211_chan_width width = 0; u32 chanspec; int freq, err; - if (!ndev) + if (!ndev || drvr->bus_if->state != BRCMF_BUS_UP) return -ENODEV; - ifp = netdev_priv(ndev); - err = brcmf_fil_iovar_int_get(ifp, "chanspec", &chanspec); + err = brcmf_fil_iovar_int_get(netdev_priv(ndev), "chanspec", &chanspec); if (err) { bphy_err(drvr, "chanspec failed (%d)\n", err); return err; @@ -6714,6 +6722,11 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) } } + if (wiphy->bands[NL80211_BAND_5GHZ] && + brcmf_feat_is_enabled(ifp, BRCMF_FEAT_DOT11H)) + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_DFS_OFFLOAD); + wiphy_read_of_freq_limits(wiphy); return 0; @@ -7189,7 +7202,6 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) brcmf_pno_detach(cfg); brcmf_btcoex_detach(cfg); wiphy_unregister(cfg->wiphy); - kfree(cfg->ops); wl_deinit_priv(cfg); brcmf_free_wiphy(cfg->wiphy); kfree(cfg); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index b7b50b07f776..14d5bbad1db1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -292,7 +292,6 @@ struct brcmf_cfg80211_wowl { */ struct brcmf_cfg80211_info { struct wiphy *wiphy; - struct cfg80211_ops *ops; struct brcmf_cfg80211_conf *conf; struct brcmf_p2p_info p2p; struct brcmf_btcoex_info *btcoex; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index 1ec48c4f4d4a..dd586a96b57a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -696,8 +696,10 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) return 0; } -static int brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci) +int brcmf_chip_get_raminfo(struct brcmf_chip *pub) { + struct brcmf_chip_priv *ci = container_of(pub, struct brcmf_chip_priv, + pub); struct brcmf_core_priv *mem_core; struct brcmf_core *mem; @@ -979,7 +981,7 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci) brcmf_chip_set_passive(&ci->pub); } - return brcmf_chip_get_raminfo(ci); + return brcmf_chip_get_raminfo(&ci->pub); } static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h index 206d7695d57a..7b00f6a59e89 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h @@ -69,6 +69,7 @@ struct brcmf_buscore_ops { void (*activate)(void *ctx, struct brcmf_chip *chip, u32 rstvec); }; +int brcmf_chip_get_raminfo(struct brcmf_chip *pub); struct brcmf_chip *brcmf_chip_attach(void *ctx, const struct brcmf_buscore_ops *ops); void brcmf_chip_detach(struct brcmf_chip *chip); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index aa89d620ee5d..dec25e415619 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -258,7 +258,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) /* query for 'ver' to get version info from firmware */ memset(buf, 0, sizeof(buf)); - strlcpy(buf, "ver", sizeof(buf)); err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf)); if (err < 0) { bphy_err(drvr, "Retrieving version information failed, %d\n", diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index bf18491a33a5..406b367c284c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -579,7 +579,8 @@ static int brcmf_netdev_stop(struct net_device *ndev) brcmf_cfg80211_down(ndev); - brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0); + if (ifp->drvr->bus_if->state == BRCMF_BUS_UP) + brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0); brcmf_net_setcarrier(ifp, false); @@ -1085,6 +1086,29 @@ static void brcmf_core_bus_reset(struct work_struct *work) brcmf_bus_reset(drvr->bus_if); } +static ssize_t bus_reset_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct brcmf_pub *drvr = file->private_data; + u8 value; + + if (kstrtou8_from_user(user_buf, count, 0, &value)) + return -EINVAL; + + if (value != 1) + return -EINVAL; + + schedule_work(&drvr->bus_reset); + + return count; +} + +static const struct file_operations bus_reset_fops = { + .open = simple_open, + .llseek = no_llseek, + .write = bus_reset_write, +}; + static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops) { int ret = -1; @@ -1160,6 +1184,8 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops) /* populate debugfs */ brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read); + debugfs_create_file("reset", 0600, brcmf_debugfs_get_devdir(drvr), drvr, + &bus_reset_fops); brcmf_feat_debugfs_create(drvr); brcmf_proto_debugfs_create(drvr); brcmf_bus_debugfs_create(bus_if); @@ -1183,13 +1209,11 @@ fail: return ret; } -int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) +int brcmf_alloc(struct device *dev, struct brcmf_mp_device *settings) { struct wiphy *wiphy; struct cfg80211_ops *ops; struct brcmf_pub *drvr = NULL; - int ret = 0; - int i; brcmf_dbg(TRACE, "Enter\n"); @@ -1198,12 +1222,30 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) return -ENOMEM; wiphy = wiphy_new(ops, sizeof(*drvr)); - if (!wiphy) + if (!wiphy) { + kfree(ops); return -ENOMEM; + } set_wiphy_dev(wiphy, dev); drvr = wiphy_priv(wiphy); drvr->wiphy = wiphy; + drvr->ops = ops; + drvr->bus_if = dev_get_drvdata(dev); + drvr->bus_if->drvr = drvr; + drvr->settings = settings; + + return 0; +} + +int brcmf_attach(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; + int ret = 0; + int i; + + brcmf_dbg(TRACE, "Enter\n"); for (i = 0; i < ARRAY_SIZE(drvr->if2bss); i++) drvr->if2bss[i] = BRCMF_BSSIDX_INVALID; @@ -1212,9 +1254,6 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) /* Link to bus module */ drvr->hdrlen = 0; - drvr->bus_if = dev_get_drvdata(dev); - drvr->bus_if->drvr = drvr; - drvr->settings = settings; /* Attach and link in the protocol */ ret = brcmf_proto_attach(drvr); @@ -1230,18 +1269,16 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) /* attach firmware event handler */ brcmf_fweh_attach(drvr); - ret = brcmf_bus_started(drvr, ops); + ret = brcmf_bus_started(drvr, drvr->ops); if (ret != 0) { bphy_err(drvr, "dongle is not responding: err=%d\n", ret); goto fail; } - drvr->config->ops = ops; return 0; fail: brcmf_detach(dev); - kfree(ops); return ret; } @@ -1307,27 +1344,37 @@ void brcmf_detach(struct device *dev) unregister_inet6addr_notifier(&drvr->inet6addr_notifier); #endif - /* stop firmware event handling */ - brcmf_fweh_detach(drvr); - if (drvr->config) - brcmf_p2p_detach(&drvr->config->p2p); - brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN); + brcmf_bus_stop(drvr->bus_if); - brcmf_proto_detach_pre_delif(drvr); + brcmf_fweh_detach(drvr); + brcmf_proto_detach(drvr); /* make sure primary interface removed last */ - for (i = BRCMF_MAX_IFS-1; i > -1; i--) - brcmf_remove_interface(drvr->iflist[i], false); + for (i = BRCMF_MAX_IFS - 1; i > -1; i--) { + if (drvr->iflist[i]) + brcmf_del_if(drvr, drvr->iflist[i]->bsscfgidx, false); + } - brcmf_cfg80211_detach(drvr->config); - drvr->config = NULL; + if (drvr->config) { + brcmf_p2p_detach(&drvr->config->p2p); + brcmf_cfg80211_detach(drvr->config); + drvr->config = NULL; + } +} - brcmf_bus_stop(drvr->bus_if); +void brcmf_free(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; - brcmf_proto_detach_post_delif(drvr); + if (!drvr) + return; bus_if->drvr = NULL; + + kfree(drvr->ops); + wiphy_free(drvr->wiphy); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 86517a3d74b1..6699637d3bf8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -97,6 +97,7 @@ struct brcmf_pub { struct brcmf_bus *bus_if; struct brcmf_proto *proto; struct wiphy *wiphy; + struct cfg80211_ops *ops; struct brcmf_cfg80211_info *config; /* Internal brcmf items */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h index ea6e8e839cae..9b221b509ade 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h @@ -121,6 +121,10 @@ int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn, int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, size_t len); #else +static inline struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr) +{ + return ERR_PTR(-ENOENT); +} static inline int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn, int (*read_fn)(struct seq_file *seq, void *data)) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 73aff4e4039d..2c3526aeca6f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -39,6 +39,7 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = { { BRCMF_FEAT_P2P, "p2p" }, { BRCMF_FEAT_MONITOR, "monitor" }, { BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" }, + { BRCMF_FEAT_DOT11H, "802.11h" } }; #ifdef DEBUG diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index f127eb2030a6..736a8179f62f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -25,6 +25,7 @@ * MONITOR: firmware can pass monitor packets to host. * MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header * MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header + * DOT11H: firmware supports 802.11h */ #define BRCMF_FEAT_LIST \ BRCMF_FEAT_DEF(MBSS) \ @@ -43,7 +44,8 @@ BRCMF_FEAT_DEF(FWSUP) \ BRCMF_FEAT_DEF(MONITOR) \ BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \ - BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) + BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \ + BRCMF_FEAT_DEF(DOT11H) /* * Quirks: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index adedd4fac10b..79c8a858b6d6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -303,16 +303,7 @@ void brcmf_fweh_attach(struct brcmf_pub *drvr) void brcmf_fweh_detach(struct brcmf_pub *drvr) { struct brcmf_fweh_info *fweh = &drvr->fweh; - struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); - s8 eventmask[BRCMF_EVENTING_MASK_LEN]; - if (ifp) { - /* clear all events */ - memset(eventmask, 0, BRCMF_EVENTING_MASK_LEN); - (void)brcmf_fil_iovar_data_set(ifp, "event_msgs", - eventmask, - BRCMF_EVENTING_MASK_LEN); - } /* cancel the worker */ cancel_work_sync(&fweh->event_work); WARN_ON(!list_empty(&fweh->event_q)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index b8452cb46297..2bd892df83cc 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -2432,25 +2432,17 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr) return fws; fail: - brcmf_fws_detach_pre_delif(fws); - brcmf_fws_detach_post_delif(fws); + brcmf_fws_detach(fws); return ERR_PTR(rc); } -void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws) +void brcmf_fws_detach(struct brcmf_fws_info *fws) { if (!fws) return; - if (fws->fws_wq) { - destroy_workqueue(fws->fws_wq); - fws->fws_wq = NULL; - } -} -void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws) -{ - if (!fws) - return; + if (fws->fws_wq) + destroy_workqueue(fws->fws_wq); /* cleanup */ brcmf_fws_lock(fws); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h index 10184eeaad94..b486d578ec96 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h @@ -7,8 +7,7 @@ #define FWSIGNAL_H_ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr); -void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws); -void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws); +void brcmf_fws_detach(struct brcmf_fws_info *fws); void brcmf_fws_debugfs_create(struct brcmf_pub *drvr); bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws); bool brcmf_fws_fc_active(struct brcmf_fws_info *fws); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 241747bd5cb2..e3dd8623be4e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -1398,6 +1398,13 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid) u8 ifidx; int err; + /* no need to submit if firmware can not be reached */ + if (drvr->bus_if->state != BRCMF_BUS_UP) { + brcmf_dbg(MSGBUF, "bus down, flowring will be removed\n"); + brcmf_msgbuf_remove_flowring(msgbuf, flowid); + return; + } + commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; brcmf_commonring_lock(commonring); ret_ptr = brcmf_commonring_reserve_for_write(commonring); @@ -1461,7 +1468,6 @@ static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) seq_printf(seq, "\nh2d_flowrings: depth %u\n", BRCMF_H2D_TXFLOWRING_MAX_ITEM); seq_puts(seq, "Active flowrings:\n"); - hash = msgbuf->flow->hash; for (i = 0; i < msgbuf->flow->nrofrings; i++) { if (!msgbuf->flow->rings[i]) continue; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 4ea5401c4d6b..6c463475e90b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -794,7 +794,8 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo, if (ch == '\n') { console->log_str[console->log_idx] = 0; if (error) - brcmf_err(bus, "CONSOLE: %s", console->log_str); + __brcmf_err(bus, __func__, "CONSOLE: %s", + console->log_str); else pr_debug("CONSOLE: %s", console->log_str); console->log_idx = 0; @@ -1769,6 +1770,12 @@ static void brcmf_pcie_setup(struct device *dev, int ret, nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len; kfree(fwreq); + ret = brcmf_chip_get_raminfo(devinfo->ci); + if (ret) { + brcmf_err(bus, "Failed to get RAM info\n"); + goto fail; + } + /* Some of the firmwares have the size of the memory of the device * defined inside the firmware. This is because part of the memory in * the device is shared and the devision is determined by FW. Parse @@ -1817,11 +1824,15 @@ static void brcmf_pcie_setup(struct device *dev, int ret, brcmf_pcie_intr_enable(devinfo); brcmf_pcie_hostready(devinfo); - if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0) - return; + + ret = brcmf_attach(&devinfo->pdev->dev); + if (ret) + goto fail; brcmf_pcie_bus_console_read(devinfo, false); + return; + fail: device_release_driver(dev); } @@ -1916,6 +1927,10 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot); dev_set_drvdata(&pdev->dev, bus); + ret = brcmf_alloc(&devinfo->pdev->dev, devinfo->settings); + if (ret) + goto fail_bus; + fwreq = brcmf_pcie_prepare_fw_request(devinfo); if (!fwreq) { ret = -ENOMEM; @@ -1964,6 +1979,7 @@ brcmf_pcie_remove(struct pci_dev *pdev) brcmf_pcie_intr_disable(devinfo); brcmf_detach(&pdev->dev); + brcmf_free(&pdev->dev); kfree(bus->bus_priv.pcie); kfree(bus->msgbuf->flowrings); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c index e3d1b075044b..2e911d4874af 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c @@ -56,22 +56,16 @@ fail: return -ENOMEM; } -void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr) +void brcmf_proto_detach(struct brcmf_pub *drvr) { brcmf_dbg(TRACE, "Enter\n"); if (drvr->proto) { if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) - brcmf_proto_bcdc_detach_post_delif(drvr); + brcmf_proto_bcdc_detach(drvr); else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF) brcmf_proto_msgbuf_detach(drvr); kfree(drvr->proto); drvr->proto = NULL; } } - -void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr) -{ - if (drvr->proto && drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) - brcmf_proto_bcdc_detach_pre_delif(drvr); -} diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h index 8d55fad531d0..bd08d3aaa8f4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h @@ -43,8 +43,7 @@ struct brcmf_proto { int brcmf_proto_attach(struct brcmf_pub *drvr); -void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr); -void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr); +void brcmf_proto_detach(struct brcmf_pub *drvr); static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, struct sk_buff *skb, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 629140b6d7e2..264ad63232f8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -4247,17 +4247,26 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, sdiod->bus_if->chip = bus->ci->chip; sdiod->bus_if->chiprev = bus->ci->chiprev; + err = brcmf_alloc(sdiod->dev, sdiod->settings); + if (err) { + brcmf_err("brcmf_alloc failed\n"); + goto claim; + } + /* Attach to the common layer, reserve hdr space */ - err = brcmf_attach(sdiod->dev, sdiod->settings); + err = brcmf_attach(sdiod->dev); if (err != 0) { brcmf_err("brcmf_attach failed\n"); - sdio_claim_host(sdiod->func1); - goto checkdied; + goto free; } /* ready */ return; +free: + brcmf_free(sdiod->dev); +claim: + sdio_claim_host(sdiod->func1); checkdied: brcmf_sdio_checkdied(bus); release: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index d33628b79a3a..06f3c01f10b3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1178,8 +1178,12 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret, if (ret) goto error; + ret = brcmf_alloc(devinfo->dev, devinfo->settings); + if (ret) + goto error; + /* Attach to the common driver interface */ - ret = brcmf_attach(devinfo->dev, devinfo->settings); + ret = brcmf_attach(devinfo->dev); if (ret) goto error; @@ -1251,7 +1255,10 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) } if (!brcmf_usb_dlneeded(devinfo)) { - ret = brcmf_attach(devinfo->dev, devinfo->settings); + ret = brcmf_alloc(devinfo->dev, devinfo->settings); + if (ret) + goto fail; + ret = brcmf_attach(devinfo->dev); if (ret) goto fail; /* we are done */ @@ -1279,6 +1286,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) fail: /* Release resources in reverse order */ + brcmf_free(devinfo->dev); kfree(bus); brcmf_usb_detach(devinfo); return ret; @@ -1292,6 +1300,7 @@ brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo) brcmf_dbg(USB, "Enter, bus_pub %p\n", devinfo); brcmf_detach(devinfo->dev); + brcmf_free(devinfo->dev); kfree(devinfo->bus_pub.bus); brcmf_usb_detach(devinfo); } @@ -1435,10 +1444,12 @@ static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state) brcmf_dbg(USB, "Enter\n"); devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP; - if (devinfo->wowl_enabled) + if (devinfo->wowl_enabled) { brcmf_cancel_all_urbs(devinfo); - else + } else { brcmf_detach(&usb->dev); + brcmf_free(&usb->dev); + } return 0; } @@ -1451,8 +1462,19 @@ static int brcmf_usb_resume(struct usb_interface *intf) struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); brcmf_dbg(USB, "Enter\n"); - if (!devinfo->wowl_enabled) - return brcmf_attach(devinfo->dev, devinfo->settings); + if (!devinfo->wowl_enabled) { + int err; + + err = brcmf_alloc(&usb->dev, devinfo->settings); + if (err) + return err; + + err = brcmf_attach(devinfo->dev); + if (err) { + brcmf_free(devinfo->dev); + return err; + } + } devinfo->bus_pub.state = BRCMFMAC_USB_STATE_UP; brcmf_usb_rx_fill_all(devinfo); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c index 7d4e8f589fdc..080e829da9b3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c @@ -5248,15 +5248,7 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config) /* Default to 54g Auto */ /* Advertise and use shortslot (-1/0/1 Auto/Off/On) */ s8 shortslot = BRCMS_SHORTSLOT_AUTO; - bool shortslot_restrict = false; /* Restrict association to stations - * that support shortslot - */ bool ofdm_basic = false; /* Make 6, 12, and 24 basic rates */ - /* Advertise and use short preambles (-1/0/1 Auto/Off/On) */ - int preamble = BRCMS_PLCP_LONG; - bool preamble_restrict = false; /* Restrict association to stations - * that support short preambles - */ struct brcms_band *band; /* if N-support is enabled, allow Gmode set as long as requested @@ -5297,16 +5289,11 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config) case GMODE_ONLY: ofdm_basic = true; - preamble = BRCMS_PLCP_SHORT; - preamble_restrict = true; break; case GMODE_PERFORMANCE: shortslot = BRCMS_SHORTSLOT_ON; - shortslot_restrict = true; ofdm_basic = true; - preamble = BRCMS_PLCP_SHORT; - preamble_restrict = true; break; default: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c index 07f61d6155ea..a3f094568cfb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c @@ -17748,7 +17748,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi) num = 8 * (16 * b0[tbl_id - 26] + b1[tbl_id - 26] * idx); den = 32768 + a1[tbl_id - 26] * idx; - pwr_est = max(((4 * num + den / 2) / den), -8); + pwr_est = max(DIV_ROUND_CLOSEST(4 * num, den), -8); if (NREV_LT(pi->pubpi.phy_rev, 3)) { if (idx <= (uint) (31 - idle_tssi[tbl_id - 26] + 1)) @@ -20035,7 +20035,7 @@ static void wlc_phy_radio_init_2056(struct brcms_phy *pi) break; default: - break; + return; } } @@ -26990,8 +26990,8 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rx_core, NPHY_RXCAL_TONEAMP, 0, cal_type, false); wlc_phy_rx_iq_est_nphy(pi, est, num_samps, 32, 0); - i_pwr = (est[rx_core].i_pwr + num_samps / 2) / num_samps; - q_pwr = (est[rx_core].q_pwr + num_samps / 2) / num_samps; + i_pwr = DIV_ROUND_CLOSEST(est[rx_core].i_pwr, num_samps); + q_pwr = DIV_ROUND_CLOSEST(est[rx_core].q_pwr, num_samps); curr_pwr = i_pwr + q_pwr; switch (gainctrl_dirn) { @@ -27673,10 +27673,10 @@ wlc_phy_cal_rxiq_nphy_rev2(struct brcms_phy *pi, wlc_phy_rx_iq_est_nphy(pi, est, num_samps, 32, 0); - i_pwr = (est[rx_core].i_pwr + - num_samps / 2) / num_samps; - q_pwr = (est[rx_core].q_pwr + - num_samps / 2) / num_samps; + i_pwr = DIV_ROUND_CLOSEST(est[rx_core].i_pwr, + num_samps); + q_pwr = DIV_ROUND_CLOSEST(est[rx_core].q_pwr, + num_samps); tot_pwr[gain_pass] = i_pwr + q_pwr; } else { diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 9342ffbe1e81..f43c06569ea1 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -5441,11 +5441,18 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { Cmd cmd; Resp rsp; - if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN; + if (ai->flags & FLAG_RADIO_MASK) { + kfree(data->rbuffer); + kfree(file->private_data); + return -ENETDOWN; + } memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_LISTBSS; - if (down_interruptible(&ai->sem)) + if (down_interruptible(&ai->sem)) { + kfree(data->rbuffer); + kfree(file->private_data); return -ERESTARTSYS; + } issuecommand(ai, &cmd, &rsp); up(&ai->sem); data->readlen = 0; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 75c0c29d81f0..8dfbaff2d1fe 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -4413,7 +4413,7 @@ static void ipw2100_kill_works(struct ipw2100_priv *priv) static int ipw2100_tx_allocate(struct ipw2100_priv *priv) { - int i, j, err = -EINVAL; + int i, j, err; void *v; dma_addr_t p; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index fa55d2ccbfab..ed0f06532d5e 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -2721,7 +2721,7 @@ static void ipw_eeprom_init_sram(struct ipw_priv *priv) /* Do not load eeprom data on fatal error or suspend */ ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); } else { - IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n"); + IPW_DEBUG_INFO("Enabling FW initialization of SRAM\n"); /* Load eeprom data on fatal error or suspend */ ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1); diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 4a88e35d58d7..73f7bbf742bc 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -4942,8 +4942,7 @@ EXPORT_SYMBOL(il_add_beacon_time); static int il_pci_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct il_priv *il = pci_get_drvdata(pdev); + struct il_priv *il = dev_get_drvdata(device); /* * This function is called when system goes into suspend state diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 235349a33a3c..7dbc0d38bb3b 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -92,20 +92,6 @@ config IWLWIFI_BCAST_FILTERING If unsure, don't enable this option, as some programs might expect incoming broadcasts for their normal operations. -config IWLWIFI_PCIE_RTPM - bool "Enable runtime power management mode for PCIe devices" - depends on IWLMVM && PM && EXPERT - help - Say Y here to enable runtime power management for PCIe - devices. If enabled, the device will go into low power mode - when idle for a short period of time, allowing for improved - power saving during runtime. Note that this feature requires - a tight integration with the platform. It is not recommended - to enable this feature without proper validation with the - specific target platform. - - If unsure, say N. - menu "Debugging Options" config IWLWIFI_DEBUG diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c index a1aa2956b382..b92255b91b72 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> @@ -69,16 +69,16 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { .fw_name_pre = IWL1000_FW_PRE, \ .ucode_api_max = IWL1000_UCODE_API_MAX, \ .ucode_api_min = IWL1000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_1000, \ + .trans.device_family = IWL_DEVICE_FAMILY_1000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ - .base_params = &iwl1000_base_params, \ + .trans.base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl1000_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", @@ -95,17 +95,17 @@ const struct iwl_cfg iwl1000_bg_cfg = { .fw_name_pre = IWL100_FW_PRE, \ .ucode_api_max = IWL100_UCODE_API_MAX, \ .ucode_api_min = IWL100_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_100, \ + .trans.device_family = IWL_DEVICE_FAMILY_100, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ - .base_params = &iwl1000_base_params, \ + .trans.base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl100_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c index 4a988b676913..2b1ae0cecc83 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> @@ -95,16 +95,16 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .fw_name_pre = IWL2000_FW_PRE, \ .ucode_api_max = IWL2000_UCODE_API_MAX, \ .ucode_api_min = IWL2000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_2000, \ + .trans.device_family = IWL_DEVICE_FAMILY_2000, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .base_params = &iwl2000_base_params, \ + .trans.base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl2000_2bgn_cfg = { @@ -123,16 +123,16 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { .fw_name_pre = IWL2030_FW_PRE, \ .ucode_api_max = IWL2030_UCODE_API_MAX, \ .ucode_api_min = IWL2030_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_2030, \ + .trans.device_family = IWL_DEVICE_FAMILY_2030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .base_params = &iwl2030_base_params, \ + .trans.base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl2030_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", @@ -144,17 +144,17 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { .fw_name_pre = IWL105_FW_PRE, \ .ucode_api_max = IWL105_UCODE_API_MAX, \ .ucode_api_min = IWL105_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_105, \ + .trans.device_family = IWL_DEVICE_FAMILY_105, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .base_params = &iwl2000_base_params, \ + .trans.base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl105_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", @@ -172,17 +172,17 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { .fw_name_pre = IWL135_FW_PRE, \ .ucode_api_max = IWL135_UCODE_API_MAX, \ .ucode_api_min = IWL135_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_135, \ + .trans.device_family = IWL_DEVICE_FAMILY_135, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .base_params = &iwl2030_base_params, \ + .trans.base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl135_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 55b713255b8e..5e355c4957df 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -56,7 +56,7 @@ #include "iwl-config.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 48 +#define IWL_22000_UCODE_API_MAX 50 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -76,7 +76,6 @@ #define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" #define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" #define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" -#define IWL_22000_HR_B_F0_FW_PRE "iwlwifi-Qu-b0-hr-b0-" #define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" #define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-" #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" @@ -99,8 +98,6 @@ IWL_22000_JF_FW_PRE __stringify(api) ".ucode" #define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(api) \ - IWL_22000_HR_B_F0_FW_PRE __stringify(api) ".ucode" #define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \ IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \ @@ -172,15 +169,15 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .smem_len = IWL_22000_SMEM_LEN, \ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ .apmg_not_supported = true, \ - .mq_rx_supported = true, \ + .trans.mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = true, \ .ht_params = &iwl_22000_ht_params, \ .nvm_ver = IWL_22000_NVM_VERSION, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .use_tfh = true, \ - .rf_id = true, \ - .gen2 = true, \ + .trans.use_tfh = true, \ + .trans.rf_id = true, \ + .trans.gen2 = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x400000, \ @@ -191,28 +188,25 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .fw_mon_smem_cycle_cnt_ptr_addr = 0xa0c174, \ .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff -#define IWL_DEVICE_AX200_COMMON \ - IWL_DEVICE_22000_COMMON, \ - .umac_prph_offset = 0x300000 - #define IWL_DEVICE_22500 \ IWL_DEVICE_22000_COMMON, \ - .device_family = IWL_DEVICE_FAMILY_22000, \ - .base_params = &iwl_22000_base_params, \ - .csr = &iwl_csr_v1, \ + .trans.device_family = IWL_DEVICE_FAMILY_22000, \ + .trans.base_params = &iwl_22000_base_params, \ + .trans.csr = &iwl_csr_v1, \ .gp2_reg_addr = 0xa02c68 #define IWL_DEVICE_22560 \ IWL_DEVICE_22000_COMMON, \ - .device_family = IWL_DEVICE_FAMILY_22560, \ - .base_params = &iwl_22560_base_params, \ - .csr = &iwl_csr_v2 + .trans.device_family = IWL_DEVICE_FAMILY_22560, \ + .trans.base_params = &iwl_22560_base_params, \ + .trans.csr = &iwl_csr_v2 #define IWL_DEVICE_AX210 \ - IWL_DEVICE_AX200_COMMON, \ - .device_family = IWL_DEVICE_FAMILY_AX210, \ - .base_params = &iwl_22560_base_params, \ - .csr = &iwl_csr_v1, \ + IWL_DEVICE_22000_COMMON, \ + .trans.umac_prph_offset = 0x300000, \ + .trans.device_family = IWL_DEVICE_FAMILY_AX210, \ + .trans.base_params = &iwl_22560_base_params, \ + .trans.csr = &iwl_csr_v1, \ .min_txq_size = 128, \ .gp2_reg_addr = 0xd02c68, \ .min_256_ba_txq_size = 512 @@ -246,6 +240,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = { * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, + .tx_with_siso_diversity = true, }; const struct iwl_cfg iwl_ax201_cfg_qu_hr = { @@ -342,7 +337,7 @@ const struct iwl_cfg iwl_ax200_cfg_cc = { * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .bisr_workaround = 1, + .trans.bisr_workaround = 1, }; const struct iwl_cfg killer1650x_2ax_cfg = { @@ -355,7 +350,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = { * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .bisr_workaround = 1, + .trans.bisr_workaround = 1, }; const struct iwl_cfg killer1650w_2ax_cfg = { @@ -368,7 +363,7 @@ const struct iwl_cfg killer1650w_2ax_cfg = { * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .bisr_workaround = 1, + .trans.bisr_workaround = 1, }; /* @@ -663,7 +658,6 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c index ce25c690d69c..aab4495c6085 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c @@ -67,16 +67,16 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { .fw_name_pre = IWL5000_FW_PRE, \ .ucode_api_max = IWL5000_UCODE_API_MAX, \ .ucode_api_min = IWL5000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_5000, \ + .trans.device_family = IWL_DEVICE_FAMILY_5000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_5000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ - .base_params = &iwl5000_base_params, \ + .trans.base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl5300_agn_cfg = { .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", @@ -115,34 +115,34 @@ const struct iwl_cfg iwl5350_agn_cfg = { .fw_name_pre = IWL5000_FW_PRE, .ucode_api_max = IWL5000_UCODE_API_MAX, .ucode_api_min = IWL5000_UCODE_API_MIN, - .device_family = IWL_DEVICE_FAMILY_5000, + .trans.device_family = IWL_DEVICE_FAMILY_5000, .max_inst_size = IWLAGN_RTC_INST_SIZE, .max_data_size = IWLAGN_RTC_DATA_SIZE, .nvm_ver = EEPROM_5050_EEPROM_VERSION, .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, - .base_params = &iwl5000_base_params, + .trans.base_params = &iwl5000_base_params, .eeprom_params = &iwl5000_eeprom_params, .ht_params = &iwl5000_ht_params, .led_mode = IWL_LED_BLINK, .internal_wimax_coex = true, - .csr = &iwl_csr_v1, + .trans.csr = &iwl_csr_v1, }; #define IWL_DEVICE_5150 \ .fw_name_pre = IWL5150_FW_PRE, \ .ucode_api_max = IWL5150_UCODE_API_MAX, \ .ucode_api_min = IWL5150_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_5150, \ + .trans.device_family = IWL_DEVICE_FAMILY_5150, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_5050_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ - .base_params = &iwl5000_base_params, \ + .trans.base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl5150_agn_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c index 67d61a1588a9..39ea81903dbe 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> @@ -116,16 +116,16 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { .fw_name_pre = IWL6005_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6005, \ + .trans.device_family = IWL_DEVICE_FAMILY_6005, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_6005_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ - .base_params = &iwl6000_g2_base_params, \ + .trans.base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl6005_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", @@ -171,16 +171,16 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { .fw_name_pre = IWL6030_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6030, \ + .trans.device_family = IWL_DEVICE_FAMILY_6030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ - .base_params = &iwl6000_g2_base_params, \ + .trans.base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl6030_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", @@ -208,16 +208,16 @@ const struct iwl_cfg iwl6030_2bg_cfg = { .fw_name_pre = IWL6030_FW_PRE, \ .ucode_api_max = IWL6035_UCODE_API_MAX, \ .ucode_api_min = IWL6035_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6030, \ + .trans.device_family = IWL_DEVICE_FAMILY_6030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ - .base_params = &iwl6000_g2_base_params, \ + .trans.base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl6035_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", @@ -262,18 +262,18 @@ const struct iwl_cfg iwl130_bg_cfg = { .fw_name_pre = IWL6000_FW_PRE, \ .ucode_api_max = IWL6000_UCODE_API_MAX, \ .ucode_api_min = IWL6000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6000i, \ + .trans.device_family = IWL_DEVICE_FAMILY_6000i, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ .nvm_ver = EEPROM_6000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ - .base_params = &iwl6000_base_params, \ + .trans.base_params = &iwl6000_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl6000i_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", @@ -295,19 +295,19 @@ const struct iwl_cfg iwl6000i_2bg_cfg = { .fw_name_pre = IWL6050_FW_PRE, \ .ucode_api_max = IWL6050_UCODE_API_MAX, \ .ucode_api_min = IWL6050_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6050, \ + .trans.device_family = IWL_DEVICE_FAMILY_6050, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ .nvm_ver = EEPROM_6050_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ - .base_params = &iwl6050_base_params, \ + .trans.base_params = &iwl6050_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl6050_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", @@ -324,17 +324,17 @@ const struct iwl_cfg iwl6050_2abg_cfg = { .fw_name_pre = IWL6050_FW_PRE, \ .ucode_api_max = IWL6050_UCODE_API_MAX, \ .ucode_api_min = IWL6050_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_6150, \ + .trans.device_family = IWL_DEVICE_FAMILY_6150, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_6150_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ - .base_params = &iwl6050_base_params, \ + .trans.base_params = &iwl6050_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 const struct iwl_cfg iwl6150_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", @@ -352,16 +352,16 @@ const struct iwl_cfg iwl6000_3agn_cfg = { .fw_name_pre = IWL6000_FW_PRE, .ucode_api_max = IWL6000_UCODE_API_MAX, .ucode_api_min = IWL6000_UCODE_API_MIN, - .device_family = IWL_DEVICE_FAMILY_6000, + .trans.device_family = IWL_DEVICE_FAMILY_6000, .max_inst_size = IWL60_RTC_INST_SIZE, .max_data_size = IWL60_RTC_DATA_SIZE, .nvm_ver = EEPROM_6000_EEPROM_VERSION, .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, - .base_params = &iwl6000_base_params, + .trans.base_params = &iwl6000_base_params, .eeprom_params = &iwl6000_eeprom_params, .ht_params = &iwl6000_ht_params, .led_mode = IWL_LED_BLINK, - .csr = &iwl_csr_v1, + .trans.csr = &iwl_csr_v1, }; MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c index 289e3c398a12..deb520aeb3f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -148,14 +148,14 @@ static const struct iwl_ht_params iwl7000_ht_params = { }; #define IWL_DEVICE_7000_COMMON \ - .device_family = IWL_DEVICE_FAMILY_7000, \ - .base_params = &iwl7000_base_params, \ + .trans.device_family = IWL_DEVICE_FAMILY_7000, \ + .trans.base_params = &iwl7000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 0, \ .non_shared_ant = ANT_A, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .dccm_offset = IWL7000_DCCM_OFFSET, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 #define IWL_DEVICE_7000 \ IWL_DEVICE_7000_COMMON, \ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index d7d17c1cceea..b3cc477140c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -8,7 +8,7 @@ * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -134,8 +134,8 @@ static const struct iwl_tt_params iwl8000_tt_params = { }; #define IWL_DEVICE_8000_COMMON \ - .device_family = IWL_DEVICE_FAMILY_8000, \ - .base_params = &iwl8000_base_params, \ + .trans.device_family = IWL_DEVICE_FAMILY_8000, \ + .trans.base_params = &iwl8000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 10, \ .features = NETIF_F_RXCSUM, \ @@ -152,7 +152,7 @@ static const struct iwl_tt_params iwl8000_tt_params = { .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x800000, \ - .csr = &iwl_csr_v1 + .trans.csr = &iwl_csr_v1 #define IWL_DEVICE_8000 \ IWL_DEVICE_8000_COMMON, \ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 41bdd0eaf62c..e8372b67df03 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -122,8 +122,8 @@ static const struct iwl_tt_params iwl9000_tt_params = { #define IWL_DEVICE_9000 \ .ucode_api_max = IWL9000_UCODE_API_MAX, \ .ucode_api_min = IWL9000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_9000, \ - .base_params = &iwl9000_base_params, \ + .trans.device_family = IWL_DEVICE_FAMILY_9000, \ + .trans.base_params = &iwl9000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 10, \ .non_shared_ant = ANT_B, \ @@ -136,14 +136,14 @@ static const struct iwl_tt_params iwl9000_tt_params = { .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ .thermal_params = &iwl9000_tt_params, \ .apmg_not_supported = true, \ - .mq_rx_supported = true, \ + .trans.mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = true, \ - .rf_id = true, \ + .trans.rf_id = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x800000, \ - .csr = &iwl_csr_v1, \ + .trans.csr = &iwl_csr_v1, \ .d3_debug_data_base_addr = 0x401000, \ .d3_debug_data_length = 92 * 1024, \ .ht_params = &iwl9000_ht_params, \ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c index b39f8b1475e1..dc3f197f94d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c @@ -2,6 +2,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2019 Intel Corporation * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> @@ -483,7 +484,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv) /* NIC configuration for 6000 series */ static void iwl6000_nic_config(struct iwl_priv *priv) { - switch (priv->cfg->device_family) { + switch (priv->trans->trans_cfg->device_family) { case IWL_DEVICE_FAMILY_6005: case IWL_DEVICE_FAMILY_6030: case IWL_DEVICE_FAMILY_6000: diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index 38fd41fba661..dd387aba3317 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -2,6 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2019 Intel Corporation * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> @@ -120,9 +121,9 @@ static int iwl_led_cmd(struct iwl_priv *priv, } led_cmd.on = iwl_blink_compensation(priv, on, - priv->cfg->base_params->led_compensation); + priv->trans->trans_cfg->base_params->led_compensation); led_cmd.off = iwl_blink_compensation(priv, off, - priv->cfg->base_params->led_compensation); + priv->trans->trans_cfg->base_params->led_compensation); ret = iwl_send_led_cmd(priv, &led_cmd); if (!ret) { diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 6c170636110a..6512d25e3563 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -1099,7 +1099,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, goto done; } - scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1; + scd_queues = BIT(priv->trans->trans_cfg->base_params->num_of_queues) - 1; scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index ae5e4570f1c1..4f2789bb3b5b 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -3,7 +3,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -1267,7 +1267,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, priv->cfg = cfg; priv->fw = fw; - switch (priv->cfg->device_family) { + switch (priv->trans->trans_cfg->device_family) { case IWL_DEVICE_FAMILY_1000: case IWL_DEVICE_FAMILY_100: priv->lib = &iwl_dvm_1000_cfg; @@ -1342,7 +1342,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, driver_data[2]); WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE < - priv->cfg->base_params->num_of_queues); + priv->trans->trans_cfg->base_params->num_of_queues); ucode_flags = fw->ucode_capa.flags; @@ -1405,9 +1405,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, /* Reset chip to save power until we load uCode during "up". */ iwl_trans_stop_device(priv->trans); - priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg, - priv->eeprom_blob, - priv->eeprom_blob_size); + priv->nvm_data = iwl_parse_eeprom_data(priv->trans, priv->cfg, + priv->eeprom_blob, + priv->eeprom_blob_size); if (!priv->nvm_data) goto out_free_eeprom_blob; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c index dcb948068c1d..93ef023905c9 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c @@ -2,6 +2,7 @@ /****************************************************************************** * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2019 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -199,7 +200,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, else cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; - if (priv->cfg->base_params->shadow_reg_enable) + if (priv->trans->trans_cfg->base_params->shadow_reg_enable) cmd->flags |= IWL_POWER_SHADOW_REG_ENA; else cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index b1e5d64ca60d..74229fcb63a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -3256,28 +3256,16 @@ static void rs_add_debugfs(void *priv, void *priv_sta, struct dentry *dir) { struct iwl_lq_sta *lq_sta = priv_sta; - lq_sta->rs_sta_dbgfs_scale_table_file = - debugfs_create_file("rate_scale_table", 0600, dir, - lq_sta, &rs_sta_dbgfs_scale_table_ops); - lq_sta->rs_sta_dbgfs_stats_table_file = - debugfs_create_file("rate_stats_table", 0400, dir, - lq_sta, &rs_sta_dbgfs_stats_table_ops); - lq_sta->rs_sta_dbgfs_rate_scale_data_file = - debugfs_create_file("rate_scale_data", 0400, dir, - lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); - lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = - debugfs_create_u8("tx_agg_tid_enable", 0600, dir, - &lq_sta->tx_agg_tid_en); -} + debugfs_create_file("rate_scale_table", 0600, dir, lq_sta, + &rs_sta_dbgfs_scale_table_ops); + debugfs_create_file("rate_stats_table", 0400, dir, lq_sta, + &rs_sta_dbgfs_stats_table_ops); + debugfs_create_file("rate_scale_data", 0400, dir, lq_sta, + &rs_sta_dbgfs_rate_scale_data_ops); + debugfs_create_u8("tx_agg_tid_enable", 0600, dir, + &lq_sta->tx_agg_tid_en); -static void rs_remove_debugfs(void *priv, void *priv_sta) -{ - struct iwl_lq_sta *lq_sta = priv_sta; - debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); } #endif @@ -3303,7 +3291,6 @@ static const struct rate_control_ops rs_ops = { .free_sta = rs_free_sta, #ifdef CONFIG_MAC80211_DEBUGFS .add_sta_debugfs = rs_add_debugfs, - .remove_sta_debugfs = rs_remove_debugfs, #endif }; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h index b7a1854cd202..68a840d739e8 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h @@ -356,10 +356,6 @@ struct iwl_lq_sta { struct iwl_traffic_load load[IWL_MAX_TID_COUNT]; u8 tx_agg_tid_en; #ifdef CONFIG_MAC80211_DEBUGFS - struct dentry *rs_sta_dbgfs_scale_table_file; - struct dentry *rs_sta_dbgfs_stats_table_file; - struct dentry *rs_sta_dbgfs_rate_scale_data_file; - struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; u32 dbg_fixed_rate; #endif struct iwl_priv *drv; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index 247f41705912..3029e3f6de63 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -2,6 +2,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2019 Intel Corporation * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> @@ -467,7 +468,7 @@ static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq) int q; for (q = IWLAGN_FIRST_AMPDU_QUEUE; - q < priv->cfg->base_params->num_of_queues; q++) { + q < priv->trans->trans_cfg->base_params->num_of_queues; q++) { if (!test_and_set_bit(q, priv->agg_q_alloc)) { priv->queue_to_mac80211[q] = mq; return q; @@ -1281,7 +1282,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, * (in Tx queue's circular buffer) of first TFD/frame in window */ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); - if (scd_flow >= priv->cfg->base_params->num_of_queues) { + if (scd_flow >= priv->trans->trans_cfg->base_params->num_of_queues) { IWL_ERR(priv, "BUG_ON scd_flow is bigger than number of queues\n"); return; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 991a23450999..6cb2d1f5efea 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -68,6 +68,7 @@ #define ACPI_WRDD_METHOD "WRDD" #define ACPI_SPLC_METHOD "SPLC" #define ACPI_ECKV_METHOD "ECKV" +#define ACPI_PPAG_METHOD "PPAG" #define ACPI_WIFI_DOMAIN (0x07) @@ -92,6 +93,17 @@ #define ACPI_WGDS_NUM_BANDS 2 #define ACPI_WGDS_TABLE_SIZE 3 +#define ACPI_PPAG_NUM_CHAINS 2 +#define ACPI_PPAG_NUM_SUB_BANDS 5 +#define ACPI_PPAG_WIFI_DATA_SIZE ((ACPI_PPAG_NUM_CHAINS * \ + ACPI_PPAG_NUM_SUB_BANDS) + 3) + +/* PPAG gain value bounds in 1/8 dBm */ +#define ACPI_PPAG_MIN_LB -16 +#define ACPI_PPAG_MAX_LB 24 +#define ACPI_PPAG_MIN_HB -16 +#define ACPI_PPAG_MAX_HB 40 + #ifdef CONFIG_ACPI void *iwl_acpi_get_object(struct device *dev, acpi_string method); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 4d2274bcc0b5..22dff2c92d4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -475,6 +475,13 @@ enum iwl_legacy_cmds { REPLY_RX_MPDU_CMD = 0xc1, /** + * @BAR_FRAME_RELEASE: Frame release from BAR notification, used for + * multi-TID BAR (previously, the BAR frame itself was reported + * instead). Uses &struct iwl_bar_frame_release. + */ + BAR_FRAME_RELEASE = 0xc2, + + /** * @FRAME_RELEASE: * Frame release (reorder helper) notification, uses * &struct iwl_frame_release diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index 31231b223aae..4c3219e7beb6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -396,6 +396,7 @@ enum iwl_wowlan_flags { * @is_11n_connection: indicates HT connection * @offloading_tid: TID reserved for firmware use * @flags: extra flags, see &enum iwl_wowlan_flags + * @sta_id: station ID for wowlan. * @reserved: reserved */ struct iwl_wowlan_config_cmd { @@ -406,8 +407,9 @@ struct iwl_wowlan_config_cmd { u8 is_11n_connection; u8 offloading_tid; u8 flags; - u8 reserved[2]; -} __packed; /* WOWLAN_CONFIG_API_S_VER_4 */ + u8 sta_id; + u8 reserved; +} __packed; /* WOWLAN_CONFIG_API_S_VER_5 */ /* * WOWLAN_TSC_RSC_PARAMS diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index aaf3974a9a20..ba586f148c14 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -140,17 +140,6 @@ struct iwl_fw_ini_hcmd_tlv { struct iwl_fw_ini_hcmd hcmd; } __packed; /* FW_DEBUG_TLV_HCMD_API_S_VER_1 */ -/** - * struct iwl_fw_ini_debug_flow_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_FLOW) - * - * @header: header - * @debug_flow_cfg: &enum iwl_fw_ini_debug_flow - */ -struct iwl_fw_ini_debug_flow_tlv { - struct iwl_fw_ini_header header; - __le32 debug_flow_cfg; -} __packed; /* FW_DEBUG_TLV_FLOW_TLV_S_VER_1 */ - #define IWL_FW_INI_MAX_REGION_ID 64 #define IWL_FW_INI_MAX_NAME 32 @@ -409,27 +398,6 @@ enum iwl_fw_ini_trigger_id { }; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */ /** - * enum iwl_fw_ini_apply_point - * - * @IWL_FW_INI_APPLY_INVALID: invalid - * @IWL_FW_INI_APPLY_EARLY: pre loading FW - * @IWL_FW_INI_APPLY_AFTER_ALIVE: first cmd from host after alive - * @IWL_FW_INI_APPLY_POST_INIT: last cmd in initialization sequence - * @IWL_FW_INI_APPLY_MISSED_BEACONS: missed beacons notification - * @IWL_FW_INI_APPLY_SCAN_COMPLETE: scan completed - * @IWL_FW_INI_APPLY_NUM: number of apply points -*/ -enum iwl_fw_ini_apply_point { - IWL_FW_INI_APPLY_INVALID, - IWL_FW_INI_APPLY_EARLY, - IWL_FW_INI_APPLY_AFTER_ALIVE, - IWL_FW_INI_APPLY_POST_INIT, - IWL_FW_INI_APPLY_MISSED_BEACONS, - IWL_FW_INI_APPLY_SCAN_COMPLETE, - IWL_FW_INI_APPLY_NUM, -}; /* FW_DEBUG_TLV_APPLY_POINT_E_VER_1 */ - -/** * enum iwl_fw_ini_allocation_id * * @IWL_FW_INI_ALLOCATION_INVALID: invalid @@ -439,6 +407,7 @@ enum iwl_fw_ini_apply_point { * @IWL_FW_INI_ALLOCATION_ID_SDFX: for SDFX module * @IWL_FW_INI_ALLOCATION_ID_FW_DUMP: used for crash and runtime dumps * @IWL_FW_INI_ALLOCATION_ID_USER_DEFINED: for future user scenarios + * @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids */ enum iwl_fw_ini_allocation_id { IWL_FW_INI_ALLOCATION_INVALID, @@ -448,6 +417,7 @@ enum iwl_fw_ini_allocation_id { IWL_FW_INI_ALLOCATION_ID_SDFX, IWL_FW_INI_ALLOCATION_ID_FW_DUMP, IWL_FW_INI_ALLOCATION_ID_USER_DEFINED, + IWL_FW_INI_ALLOCATION_NUM, }; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */ /** @@ -519,4 +489,72 @@ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_NUM }; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */ +/** + * enum iwl_fw_ini_time_point + * + * Hard coded time points in which the driver can send hcmd or perform dump + * collection + * + * @IWL_FW_INI_TIME_POINT_EARLY: pre loading the FW + * @IWL_FW_INI_TIME_POINT_AFTER_ALIVE: first cmd from host after alive notif + * @IWL_FW_INI_TIME_POINT_POST_INIT: last cmd in series of init sequence + * @IWL_FW_INI_TIME_POINT_FW_ASSERT: FW assert + * @IWL_FW_INI_TIME_POINT_FW_HW_ERROR: FW HW error + * @IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG: TFD queue hang + * @IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFOCATION: DHC cmd response and notif + * @IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF: FW response or notification. + * data field holds id and group + * @IWL_FW_INI_TIME_POINT_USER_TRIGGER: user trigger time point + * @IWL_FW_INI_TIME_POINT_PERIODIC: periodic timepoint that fires in constant + * intervals. data field holds the interval time in msec + * @IWL_FW_INI_TIME_POINT_WDG_TIMEOUT: watchdog timeout + * @IWL_FW_INI_TIME_POINT_HOST_ASSERT: Unused + * @IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT: alive timeout + * @IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE: device enable + * @IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE: device disable + * @IWL_FW_INI_TIME_POINT_HOST_D3_START: D3 start + * @IWL_FW_INI_TIME_POINT_HOST_D3_END: D3 end + * @IWL_FW_INI_TIME_POINT_MISSED_BEACONS: missed beacons + * @IWL_FW_INI_TIME_POINT_ASSOC_FAILED: association failure + * @IWL_FW_INI_TIME_POINT_TX_FAILED: Tx frame failed + * @IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED: wifi direct action + * frame failed + * @IWL_FW_INI_TIME_POINT_TX_LATENCY_THRESHOLD: Tx latency threshold + * @IWL_FW_INI_TIME_POINT_HANG_OCCURRED: hang occurred + * @IWL_FW_INI_TIME_POINT_EAPOL_FAILED: EAPOL failed + * @IWL_FW_INI_TIME_POINT_FAKE_TX: fake Tx + * @IWL_FW_INI_TIME_POINT_DEASSOC: de association + * @IWL_FW_INI_TIME_POINT_NUM: number of time points + */ +enum iwl_fw_ini_time_point { + IWL_FW_INI_TIME_POINT_INVALID, + IWL_FW_INI_TIME_POINT_EARLY, + IWL_FW_INI_TIME_POINT_AFTER_ALIVE, + IWL_FW_INI_TIME_POINT_POST_INIT, + IWL_FW_INI_TIME_POINT_FW_ASSERT, + IWL_FW_INI_TIME_POINT_FW_HW_ERROR, + IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG, + IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFOCATION, + IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, + IWL_FW_INI_TIME_POINT_USER_TRIGGER, + IWL_FW_INI_TIME_POINT_PERIODIC, + IWL_FW_INI_TIME_POINT_WDG_TIMEOUT, + IWL_FW_INI_TIME_POINT_HOST_ASSERT, + IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT, + IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE, + IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, + IWL_FW_INI_TIME_POINT_HOST_D3_START, + IWL_FW_INI_TIME_POINT_HOST_D3_END, + IWL_FW_INI_TIME_POINT_MISSED_BEACONS, + IWL_FW_INI_TIME_POINT_ASSOC_FAILED, + IWL_FW_INI_TIME_POINT_TX_FAILED, + IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED, + IWL_FW_INI_TIME_POINT_TX_LATENCY_THRESHOLD, + IWL_FW_INI_TIME_POINT_HANG_OCCURRED, + IWL_FW_INI_TIME_POINT_EAPOL_FAILED, + IWL_FW_INI_TIME_POINT_FAKE_TX, + IWL_FW_INI_TIME_POINT_DEASSOC, + IWL_FW_INI_TIME_POINT_NUM, +}; /* FW_TLV_DEBUG_TIME_POINT_API_E */ + #endif diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 988584973aba..98e957ecbeed 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -8,7 +8,7 @@ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -81,6 +81,19 @@ enum iwl_debug_cmds { */ UMAC_RD_WR = 0x1, /** + * @DBGC_SUSPEND_RESUME: + * DBGC suspend/resume commad. Uses a single dword as data: + * 0 - resume DBGC recording + * 1 - suspend DBGC recording + */ + DBGC_SUSPEND_RESUME = 0x7, + /** + * @BUFFER_ALLOCATION: + * passes DRAM buffers to a DBGC + * &struct iwl_buf_alloc_cmd + */ + BUFFER_ALLOCATION = 0x8, + /** * @MFU_ASSERT_DUMP_NTF: * &struct iwl_mfu_assert_dump_notif */ @@ -102,6 +115,16 @@ enum { FW_ERR_FATAL = 0xFF }; +/** enum iwl_dbg_suspend_resume_cmds - dbgc suspend resume operations + * dbgc suspend resume command operations + * @DBGC_RESUME_CMD: resume dbgc recording + * @DBGC_SUSPEND_CMD: stop dbgc recording + */ +enum iwl_dbg_suspend_resume_cmds { + DBGC_RESUME_CMD, + DBGC_SUSPEND_CMD, +}; + /** * struct iwl_error_resp - FW error indication * ( REPLY_ERROR = 0x2 ) @@ -335,49 +358,39 @@ struct iwl_dbg_mem_access_rsp { __le32 data[]; } __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ -#define LDBG_CFG_COMMAND_SIZE 80 -#define BUFFER_ALLOCATION 0x27 -#define START_DEBUG_RECORDING 0x29 -#define STOP_DEBUG_RECORDING 0x2A +/** + * struct iwl_dbg_suspend_resume_cmd - dbgc suspend resume command + * @operation: suspend or resume operation, uses + * &enum iwl_dbg_suspend_resume_cmds + */ +struct iwl_dbg_suspend_resume_cmd { + __le32 operation; +} __packed; -/* maximum fragments to be allocated per target of allocationId */ -#define IWL_BUFFER_LOCATION_MAX_FRAGS 2 +#define BUF_ALLOC_MAX_NUM_FRAGS 16 /** - * struct iwl_fragment_data single fragment structure - * @address: 64bit start address - * @size: size in bytes + * struct iwl_buf_alloc_frag - a DBGC fragment + * @addr: base address of the fragment + * @size: size of the fragment */ -struct iwl_fragment_data { - __le64 address; +struct iwl_buf_alloc_frag { + __le64 addr; __le32 size; } __packed; /* FRAGMENT_STRUCTURE_API_S_VER_1 */ /** - * struct iwl_buffer_allocation_cmd - buffer allocation command structure - * @allocation_id: id of the allocation - * @buffer_location: location of the buffer + * struct iwl_buf_alloc_cmd - buffer allocation command + * @alloc_id: &enum iwl_fw_ini_allocation_id + * @buf_location: &enum iwl_fw_ini_buffer_location * @num_frags: number of fragments - * @fragments: memory fragments + * @frags: fragments array */ -struct iwl_buffer_allocation_cmd { - __le32 allocation_id; - __le32 buffer_location; +struct iwl_buf_alloc_cmd { + __le32 alloc_id; + __le32 buf_location; __le32 num_frags; - struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS]; -} __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */ - -/** - * struct iwl_ldbg_config_cmd - LDBG config command - * @type: configuration type - * @pad: reserved space for type-dependent data - */ -struct iwl_ldbg_config_cmd { - __le32 type; - union { - u8 pad[LDBG_CFG_COMMAND_SIZE - sizeof(__le32)]; - struct iwl_buffer_allocation_cmd buffer_allocation; - }; /* LDBG_CFG_BODY_API_U_VER_2 (partially) */ -} __packed; /* LDBG_CFG_CMD_API_S_VER_2 */ + struct iwl_buf_alloc_frag frags[BUF_ALLOC_MAX_NUM_FRAGS]; +} __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_2 */ #endif /* __iwl_fw_api_debug_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index ec864c7b497f..7a0fe5adefa5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -82,7 +82,7 @@ enum iwl_location_subcmd_ids { TOF_RANGE_ABORT_CMD = 0x2, /** * @TOF_RANGE_REQ_EXT_CMD: TOF extended ranging config, - * uses &struct iwl_tof_range_request_ext_cmd + * uses &struct iwl_tof_range_req_ext_cmd */ TOF_RANGE_REQ_EXT_CMD = 0x3, /** @@ -292,7 +292,7 @@ struct iwl_tof_responder_dyn_config_cmd { } __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */ /** - * struct iwl_tof_range_request_ext_cmd - extended range req for WLS + * struct iwl_tof_range_req_ext_cmd - extended range req for WLS * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF * @reserved: reserved * @min_delta_ftm: Minimal time between two consecutive measurements, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 85c5e367cbf1..73fb0030c496 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -500,6 +500,9 @@ struct iwl_he_pkt_ext { * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA * parameter set, i.e. the backoff counters for trig-based ACs + * @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are + * not allowed (as there are OBSS that might classify such transmissions as + * radar pulses). */ enum iwl_he_sta_ctxt_flags { STA_CTXT_HE_REF_BSSID_VALID = BIT(4), @@ -511,6 +514,7 @@ enum iwl_he_sta_ctxt_flags { STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10), STA_CTXT_HE_ACK_ENABLED = BIT(11), STA_CTXT_HE_MU_EDCA_CW = BIT(12), + STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14), }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h index 9cc59e00bd95..8991ddffbf5e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -90,6 +92,11 @@ enum iwl_phy_ops_subcmd_ids { GEO_TX_POWER_LIMIT = 0x05, /** + * @PER_PLATFORM_ANT_GAIN_CMD: &struct iwl_ppag_table_cmd + */ + PER_PLATFORM_ANT_GAIN_CMD = 0x07, + + /** * @CT_KILL_NOTIFICATION: &struct ct_kill_notif */ CT_KILL_NOTIFICATION = 0xFE, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index f195db398bed..6e1b9b21904e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -450,6 +450,18 @@ struct iwl_geo_tx_power_profiles_resp { } __packed; /* GEO_TX_POWER_LIMIT_RESP */ /** + * struct iwl_ppag_table_cmd - struct for PER_PLATFORM_ANT_GAIN_CMD cmd. + * @enabled: 1 if PPAG is enabled, 0 otherwise + * @gain: table of antenna gain values per chain and sub-band + * @reserved: reserved + */ +struct iwl_ppag_table_cmd { + __le32 enabled; + s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; + s8 reserved[2]; +} __packed; /* PER_PLATFORM_ANT_GAIN_CMD */ + +/** * struct iwl_beacon_filter_cmd * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index 9eddc4dc2ae6..4347be6491e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -166,8 +166,16 @@ enum iwl_tlc_mng_ht_rates { IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11, }; -/* Maximum supported tx antennas number */ -#define MAX_NSS 2 +enum IWL_TLC_MNG_NSS { + IWL_TLC_NSS_1, + IWL_TLC_NSS_2, + IWL_TLC_NSS_MAX +}; + +enum IWL_TLC_HT_BW_RATES { + IWL_TLC_HT_BW_NONE_160, + IWL_TLC_HT_BW_160, +}; /** * struct tlc_config_cmd - TLC configuration @@ -195,7 +203,7 @@ struct iwl_tlc_config_cmd { u8 amsdu; __le16 flags; __le16 non_ht_rates; - __le16 ht_rates[MAX_NSS][2]; + __le16 ht_rates[IWL_TLC_NSS_MAX][2]; __le16 max_mpdu_len; u8 sgi_ch_width_supp; u8 reserved2[1]; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 9b0bb89599fc..a93449db7bb2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -746,6 +746,38 @@ struct iwl_frame_release { __le16 nssn; }; +/** + * enum iwl_bar_frame_release_sta_tid - STA/TID information for BAR release + * @IWL_BAR_FRAME_RELEASE_TID_MASK: TID mask + * @IWL_BAR_FRAME_RELEASE_STA_MASK: STA mask + */ +enum iwl_bar_frame_release_sta_tid { + IWL_BAR_FRAME_RELEASE_TID_MASK = 0x0000000f, + IWL_BAR_FRAME_RELEASE_STA_MASK = 0x000001f0, +}; + +/** + * enum iwl_bar_frame_release_ba_info - BA information for BAR release + * @IWL_BAR_FRAME_RELEASE_NSSN_MASK: NSSN mask + * @IWL_BAR_FRAME_RELEASE_SN_MASK: SN mask (ignored by driver) + * @IWL_BAR_FRAME_RELEASE_BAID_MASK: BAID mask + */ +enum iwl_bar_frame_release_ba_info { + IWL_BAR_FRAME_RELEASE_NSSN_MASK = 0x00000fff, + IWL_BAR_FRAME_RELEASE_SN_MASK = 0x00fff000, + IWL_BAR_FRAME_RELEASE_BAID_MASK = 0x3f000000, +}; + +/** + * struct iwl_bar_frame_release - frame release from BAR info + * @sta_tid: STA & TID information, see &enum iwl_bar_frame_release_sta_tid. + * @ba_info: BA information, see &enum iwl_bar_frame_release_ba_info. + */ +struct iwl_bar_frame_release { + __le32 sta_tid; + __le32 ba_info; +} __packed; /* RX_BAR_TO_FRAME_RELEASE_API_S_VER_1 */ + enum iwl_rss_hash_func_en { IWL_RSS_HASH_TYPE_IPV4_TCP, IWL_RSS_HASH_TYPE_IPV4_UDP, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index c4960f045415..39c64850cb6f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -93,6 +93,8 @@ struct iwl_ssid_ie { #define IWL_SCAN_SHORT_BLACKLIST_LEN 16 #define IWL_SCAN_MAX_PROFILES 11 #define SCAN_OFFLOAD_PROBE_REQ_SIZE 512 +#define SCAN_NUM_BAND_PROBE_DATA_V_1 2 +#define SCAN_NUM_BAND_PROBE_DATA_V_2 3 /* Default watchdog (in MS) for scheduled scan iteration */ #define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000) @@ -251,9 +253,22 @@ struct iwl_scan_probe_segment { * @common_data: last (and common) part of the probe * @buf: raw data block */ +struct iwl_scan_probe_req_v1 { + struct iwl_scan_probe_segment mac_header; + struct iwl_scan_probe_segment band_data[SCAN_NUM_BAND_PROBE_DATA_V_1]; + struct iwl_scan_probe_segment common_data; + u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; +} __packed; + +/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2 + * @mac_header: first (and common) part of the probe + * @band_data: band specific data + * @common_data: last (and common) part of the probe + * @buf: raw data block + */ struct iwl_scan_probe_req { struct iwl_scan_probe_segment mac_header; - struct iwl_scan_probe_segment band_data[2]; + struct iwl_scan_probe_segment band_data[SCAN_NUM_BAND_PROBE_DATA_V_2]; struct iwl_scan_probe_segment common_data; u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; } __packed; @@ -608,15 +623,29 @@ enum iwl_umac_scan_general_flags2 { * struct iwl_scan_channel_cfg_umac * @flags: bitmap - 0-19: directed scan to i'th ssid. * @channel_num: channel number 1-13 etc. + * @band: band of channel: 0 for 2GHz, 1 for 5GHz * @iter_count: repetition count for the channel. * @iter_interval: interval between two scan iterations on one channel. */ -struct iwl_scan_channel_cfg_umac { +struct iwl_scan_channel_cfg_umac { __le32 flags; - u8 channel_num; - u8 iter_count; - __le16 iter_interval; -} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */ + /* Both versions are of the same size, so use a union without adjusting + * the command size later + */ + union { + struct { + u8 channel_num; + u8 iter_count; + __le16 iter_interval; + } v1; /* SCAN_CHANNEL_CFG_S_VER1 */ + struct { + u8 channel_num; + u8 band; + u8 iter_count; + u8 iter_interval; + } v2; /* SCAN_CHANNEL_CFG_S_VER2 */ + }; +} __packed; /** * struct iwl_scan_umac_schedule @@ -630,6 +659,16 @@ struct iwl_scan_umac_schedule { u8 reserved; } __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */ +struct iwl_scan_req_umac_tail_v1 { + /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ + struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS]; + __le16 delay; + __le16 reserved; + /* SCAN_PROBE_PARAMS_API_S_VER_1 */ + struct iwl_scan_probe_req_v1 preq; + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; +} __packed; + /** * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command * parameters following channels configuration array. @@ -639,12 +678,12 @@ struct iwl_scan_umac_schedule { * @preq: probe request with IEs blocks * @direct_scan: list of SSIDs for directed active scan */ -struct iwl_scan_req_umac_tail { +struct iwl_scan_req_umac_tail_v2 { /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS]; __le16 delay; __le16 reserved; - /* SCAN_PROBE_PARAMS_API_S_VER_1 */ + /* SCAN_PROBE_PARAMS_API_S_VER_2 */ struct iwl_scan_probe_req preq; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; } __packed; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 4d81776f576d..5c8602de9168 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -243,7 +243,7 @@ static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt, /* Pull RXF2 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, RXF_DIFF_FROM_PREV + - fwrt->trans->cfg->umac_prph_offset, 1); + fwrt->trans->trans_cfg->umac_prph_offset, 1); /* Pull LMAC2 RXF1 */ if (fwrt->smem_cfg.num_lmacs > 1) iwl_fwrt_dump_rxf(fwrt, dump_data, @@ -468,6 +468,9 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a05400, .end = 0x00a056e8 }, { .start = 0x00a08000, .end = 0x00a098bc }, { .start = 0x00a02400, .end = 0x00a02758 }, + { .start = 0x00a04764, .end = 0x00a0476c }, + { .start = 0x00a04770, .end = 0x00a04774 }, + { .start = 0x00a04620, .end = 0x00a04624 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = { @@ -681,17 +684,18 @@ static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr, { u32 range_len; - if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210); handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr); - } else if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + } else if (fwrt->trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_22000) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000); handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr); } else { range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm); handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr); - if (fwrt->trans->cfg->mq_rx_supported) { + if (fwrt->trans->trans_cfg->mq_rx_supported) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000); handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr); } @@ -853,7 +857,8 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, iwl_fw_prph_handler(fwrt, &prph_len, iwl_fw_get_prph_len); - if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 && + if (fwrt->trans->trans_cfg->device_family == + IWL_DEVICE_FAMILY_7000 && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } @@ -1103,25 +1108,9 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, return sizeof(*range) + le32_to_cpu(range->range_data_size); } -static int -iwl_dump_ini_paging_gen2_iter(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_region_cfg *reg, - void *range_ptr, int idx) -{ - struct iwl_fw_ini_error_dump_range *range = range_ptr; - u32 page_size = fwrt->trans->init_dram.paging[idx].size; - - range->page_num = cpu_to_le32(idx); - range->range_data_size = cpu_to_le32(page_size); - memcpy(range->data, fwrt->trans->init_dram.paging[idx].block, - page_size); - - return sizeof(*range) + le32_to_cpu(range->range_data_size); -} - -static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_region_cfg *reg, - void *range_ptr, int idx) +static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *range_ptr, int idx) { /* increase idx by 1 since the pages are from 1 to * fwrt->num_of_paging_blk + 1 @@ -1142,6 +1131,27 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, return sizeof(*range) + le32_to_cpu(range->range_data_size); } +static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *range_ptr, int idx) +{ + struct iwl_fw_ini_error_dump_range *range; + u32 page_size; + + if (!fwrt->trans->trans_cfg->gen2) + return _iwl_dump_ini_paging_iter(fwrt, reg, range_ptr, idx); + + range = range_ptr; + page_size = fwrt->trans->init_dram.paging[idx].size; + + range->page_num = cpu_to_le32(idx); + range->range_data_size = cpu_to_le32(page_size); + memcpy(range->data, fwrt->trans->init_dram.paging[idx].block, + page_size); + + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + static int iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg, void *range_ptr, @@ -1163,35 +1173,23 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt, return sizeof(*range) + le32_to_cpu(range->range_data_size); } -struct iwl_ini_txf_iter_data { - int fifo; - int lmac; - u32 fifo_size; - bool internal_txf; - bool init; -}; - static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_region_cfg *reg) + struct iwl_fw_ini_region_cfg *reg, int idx) { - struct iwl_ini_txf_iter_data *iter = fwrt->dump.fifo_iter; + struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; int txf_num = cfg->num_txfifo_entries; int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size); u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid1); - if (!iter) - return false; - - if (iter->init) { + if (!idx) { if (le32_to_cpu(reg->offset) && WARN_ONCE(cfg->num_lmacs == 1, "Invalid lmac offset: 0x%x\n", le32_to_cpu(reg->offset))) return false; - iter->init = false; - iter->internal_txf = false; + iter->internal_txf = 0; iter->fifo_size = 0; iter->fifo = -1; if (le32_to_cpu(reg->offset)) @@ -1208,7 +1206,7 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt, return true; } - iter->internal_txf = true; + iter->internal_txf = 1; if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) @@ -1229,7 +1227,7 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, void *range_ptr, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; - struct iwl_ini_txf_iter_data *iter; + struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data; u32 offs = le32_to_cpu(reg->offset), addr; u32 registers_size = @@ -1238,14 +1236,12 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, unsigned long flags; int i; - if (!iwl_ini_txf_iter(fwrt, reg)) + if (!iwl_ini_txf_iter(fwrt, reg, idx)) return -EIO; if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return -EBUSY; - iter = fwrt->dump.fifo_iter; - range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo); range->fifo_hdr.num_of_registers = reg->fifos.num_of_registers; range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size); @@ -1448,7 +1444,7 @@ static void struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; u32 write_ptr_addr, write_ptr_msk, cycle_cnt_addr, cycle_cnt_msk; - switch (fwrt->trans->cfg->device_family) { + switch (fwrt->trans->trans_cfg->device_family) { case IWL_DEVICE_FAMILY_9000: case IWL_DEVICE_FAMILY_22000: write_ptr_addr = MON_BUFF_WRPTR_VER2; @@ -1458,7 +1454,7 @@ static void break; default: IWL_ERR(fwrt, "Unsupported device family %d\n", - fwrt->trans->cfg->device_family); + fwrt->trans->trans_cfg->device_family); return NULL; } @@ -1475,10 +1471,10 @@ static void struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; const struct iwl_cfg *cfg = fwrt->trans->cfg; - if (fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_9000 && - fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) { + if (fwrt->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000 && + fwrt->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22000) { IWL_ERR(fwrt, "Unsupported device family %d\n", - fwrt->trans->cfg->device_family); + fwrt->trans->trans_cfg->device_family); return NULL; } @@ -1496,15 +1492,12 @@ static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, return le32_to_cpu(reg->internal.num_of_ranges); } -static u32 iwl_dump_ini_paging_gen2_ranges(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_region_cfg *reg) -{ - return fwrt->trans->init_dram.paging_cnt; -} - static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { + if (fwrt->trans->trans_cfg->gen2) + return fwrt->trans->init_dram.paging_cnt; + return fwrt->num_of_paging_blk; } @@ -1517,16 +1510,11 @@ static u32 iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt, static u32 iwl_dump_ini_txf_ranges(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { - struct iwl_ini_txf_iter_data iter = { .init = true }; - void *fifo_iter = fwrt->dump.fifo_iter; u32 num_of_fifos = 0; - fwrt->dump.fifo_iter = &iter; - while (iwl_ini_txf_iter(fwrt, reg)) + while (iwl_ini_txf_iter(fwrt, reg, num_of_fifos)) num_of_fifos++; - fwrt->dump.fifo_iter = fifo_iter; - return num_of_fifos; } @@ -1548,20 +1536,6 @@ static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt, le32_to_cpu(reg->internal.range_data_size)); } -static u32 iwl_dump_ini_paging_gen2_get_size(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_region_cfg *reg) -{ - int i; - u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range); - u32 size = sizeof(struct iwl_fw_ini_error_dump); - - for (i = 0; i < iwl_dump_ini_paging_gen2_ranges(fwrt, reg); i++) - size += range_header_len + - fwrt->trans->init_dram.paging[i].size; - - return size; -} - static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { @@ -1569,8 +1543,15 @@ static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt, u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range); u32 size = sizeof(struct iwl_fw_ini_error_dump); - for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg); i++) - size += range_header_len + fwrt->fw_paging_db[i].fw_paging_size; + if (fwrt->trans->trans_cfg->gen2) { + for (i = 0; i < iwl_dump_ini_paging_ranges(fwrt, reg); i++) + size += range_header_len + + fwrt->trans->init_dram.paging[i].size; + } else { + for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg); i++) + size += range_header_len + + fwrt->fw_paging_db[i].fw_paging_size; + } return size; } @@ -1599,25 +1580,21 @@ static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt, static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { - struct iwl_ini_txf_iter_data iter = { .init = true }; - void *fifo_iter = fwrt->dump.fifo_iter; + struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; u32 size = 0; u32 fifo_hdr = sizeof(struct iwl_fw_ini_error_dump_range) + le32_to_cpu(reg->fifos.num_of_registers) * sizeof(struct iwl_fw_ini_error_dump_register); - fwrt->dump.fifo_iter = &iter; - while (iwl_ini_txf_iter(fwrt, reg)) { + while (iwl_ini_txf_iter(fwrt, reg, size)) { size += fifo_hdr; if (!reg->fifos.header_only) - size += iter.fifo_size; + size += iter->fifo_size; } if (size) size += sizeof(struct iwl_fw_ini_error_dump); - fwrt->dump.fifo_iter = fifo_iter; - return size; } @@ -1661,38 +1638,50 @@ struct iwl_dump_ini_mem_ops { }; /** - * iwl_dump_ini_mem - copy a memory region into the dump - * @fwrt: fw runtime struct. - * @data: dump memory data. - * @reg: region to copy to the dump. - * @ops: memory dump operations. + * iwl_dump_ini_mem + * + * Creates a dump tlv and copy a memory region into it. + * Returns the size of the current dump tlv or 0 if failed + * + * @fwrt: fw runtime struct + * @list: list to add the dump tlv to + * @reg: memory region + * @ops: memory dump operations */ -static void -iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, - struct iwl_fw_error_dump_data **data, - struct iwl_fw_ini_region_cfg *reg, - struct iwl_dump_ini_mem_ops *ops) +static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, + struct iwl_fw_ini_region_cfg *reg, + const struct iwl_dump_ini_mem_ops *ops) { - struct iwl_fw_ini_error_dump_header *header = (void *)(*data)->data; + struct iwl_fw_ini_dump_entry *entry; + struct iwl_fw_error_dump_data *tlv; + struct iwl_fw_ini_error_dump_header *header; u32 num_of_ranges, i, type = le32_to_cpu(reg->region_type), size; void *range; - if (WARN_ON(!ops || !ops->get_num_of_ranges || !ops->get_size || - !ops->fill_mem_hdr || !ops->fill_range)) - return; + if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr || + !ops->fill_range) + return 0; size = ops->get_size(fwrt, reg); if (!size) - return; + return 0; + + entry = kmalloc(sizeof(*entry) + sizeof(*tlv) + size, GFP_KERNEL); + if (!entry) + return 0; + + entry->size = sizeof(*tlv) + size; - IWL_DEBUG_FW(fwrt, "WRT: collecting region: id=%d, type=%d\n", + tlv = (void *)entry->data; + tlv->type = cpu_to_le32(type); + tlv->len = cpu_to_le32(size); + + IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", le32_to_cpu(reg->region_id), type); num_of_ranges = ops->get_num_of_ranges(fwrt, reg); - (*data)->type = cpu_to_le32(type); - (*data)->len = cpu_to_le32(size); - + header = (void *)tlv->data; header->region_id = reg->region_id; header->num_of_ranges = cpu_to_le32(num_of_ranges); header->name_len = cpu_to_le32(min_t(int, IWL_FW_INI_MAX_NAME, @@ -1702,10 +1691,9 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, range = ops->fill_mem_hdr(fwrt, reg, header); if (!range) { IWL_ERR(fwrt, - "WRT: failed to fill region header: id=%d, type=%d\n", + "WRT: Failed to fill region header: id=%d, type=%d\n", le32_to_cpu(reg->region_id), type); - memset(*data, 0, size); - return; + goto out_err; } for (i = 0; i < num_of_ranges; i++) { @@ -1713,30 +1701,49 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, if (range_size < 0) { IWL_ERR(fwrt, - "WRT: failed to dump region: id=%d, type=%d\n", + "WRT: Failed to dump region: id=%d, type=%d\n", le32_to_cpu(reg->region_id), type); - memset(*data, 0, size); - return; + goto out_err; } range = range + range_size; } - *data = iwl_fw_error_next_data(*data); + + list_add_tail(&entry->list, list); + + return entry->size; + +out_err: + kfree(entry); + + return 0; } -static void iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_trigger *trigger, - struct iwl_fw_error_dump_data **data) +static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_trigger *trigger, + struct list_head *list) { - struct iwl_fw_ini_dump_info *dump = (void *)(*data)->data; + struct iwl_fw_ini_dump_entry *entry; + struct iwl_fw_error_dump_data *tlv; + struct iwl_fw_ini_dump_info *dump; u32 reg_ids_size = le32_to_cpu(trigger->num_regions) * sizeof(__le32); + u32 size = sizeof(*tlv) + sizeof(*dump) + reg_ids_size; - (*data)->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE); - (*data)->len = cpu_to_le32(sizeof(*dump) + reg_ids_size); + entry = kmalloc(sizeof(*entry) + size, GFP_KERNEL); + if (!entry) + return 0; + + entry->size = size; + + tlv = (void *)entry->data; + tlv->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE); + tlv->len = cpu_to_le32(sizeof(*dump) + reg_ids_size); + + dump = (void *)tlv->data; dump->version = cpu_to_le32(IWL_INI_DUMP_VER); dump->trigger_id = trigger->trigger_id; dump->is_external_cfg = - cpu_to_le32(fwrt->trans->dbg.external_ini_loaded); + cpu_to_le32(fwrt->trans->dbg.external_ini_cfg); dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type); dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype); @@ -1770,30 +1777,98 @@ static void iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, dump->external_dbg_cfg_name_len = cpu_to_le32(sizeof(dump->external_dbg_cfg_name)); - /* dump info size is allocated in iwl_fw_ini_get_trigger_len. - * The driver allocates (sizeof(*dump) + reg_ids_size) so it is safe to - * use reg_ids_size - */ memcpy(dump->external_dbg_cfg_name, fwrt->dump.external_dbg_cfg_name, sizeof(dump->external_dbg_cfg_name)); dump->regions_num = trigger->num_regions; memcpy(dump->region_ids, trigger->data, reg_ids_size); - *data = iwl_fw_error_next_data(*data); + /* add dump info TLV to the beginning of the list since it needs to be + * the first TLV in the dump + */ + list_add(&entry->list, list); + + return entry->size; } -static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_trigger *trigger) -{ - int i, ret_size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data); - u32 size; +static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { + [IWL_FW_INI_REGION_INVALID] = {}, + [IWL_FW_INI_REGION_DEVICE_MEMORY] = { + .get_num_of_ranges = iwl_dump_ini_mem_ranges, + .get_size = iwl_dump_ini_mem_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_dev_mem_iter, + }, + [IWL_FW_INI_REGION_PERIPHERY_MAC] = { + .get_num_of_ranges = iwl_dump_ini_mem_ranges, + .get_size = iwl_dump_ini_mem_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_prph_iter, + }, + [IWL_FW_INI_REGION_PERIPHERY_PHY] = {}, + [IWL_FW_INI_REGION_PERIPHERY_AUX] = {}, + [IWL_FW_INI_REGION_DRAM_BUFFER] = { + .get_num_of_ranges = iwl_dump_ini_mon_dram_ranges, + .get_size = iwl_dump_ini_mon_dram_get_size, + .fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header, + .fill_range = iwl_dump_ini_mon_dram_iter, + }, + [IWL_FW_INI_REGION_DRAM_IMR] = {}, + [IWL_FW_INI_REGION_INTERNAL_BUFFER] = { + .get_num_of_ranges = iwl_dump_ini_mem_ranges, + .get_size = iwl_dump_ini_mon_smem_get_size, + .fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header, + .fill_range = iwl_dump_ini_dev_mem_iter, + }, + [IWL_FW_INI_REGION_TXF] = { + .get_num_of_ranges = iwl_dump_ini_txf_ranges, + .get_size = iwl_dump_ini_txf_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_txf_iter, + }, + [IWL_FW_INI_REGION_RXF] = { + .get_num_of_ranges = iwl_dump_ini_rxf_ranges, + .get_size = iwl_dump_ini_rxf_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_rxf_iter, + }, + [IWL_FW_INI_REGION_PAGING] = { + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .get_num_of_ranges = iwl_dump_ini_paging_ranges, + .get_size = iwl_dump_ini_paging_get_size, + .fill_range = iwl_dump_ini_paging_iter, + }, + [IWL_FW_INI_REGION_CSR] = { + .get_num_of_ranges = iwl_dump_ini_mem_ranges, + .get_size = iwl_dump_ini_mem_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_csr_iter, + }, + [IWL_FW_INI_REGION_NOTIFICATION] = {}, + [IWL_FW_INI_REGION_DHC] = {}, + [IWL_FW_INI_REGION_LMAC_ERROR_TABLE] = { + .get_num_of_ranges = iwl_dump_ini_mem_ranges, + .get_size = iwl_dump_ini_mem_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_dev_mem_iter, + }, + [IWL_FW_INI_REGION_UMAC_ERROR_TABLE] = { + .get_num_of_ranges = iwl_dump_ini_mem_ranges, + .get_size = iwl_dump_ini_mem_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_dev_mem_iter, + }, +}; - if (!trigger || !trigger->num_regions) - return 0; +static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_trigger *trigger, + struct list_head *list) +{ + int i; + u32 size = 0; for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) { - u32 reg_id = le32_to_cpu(trigger->data[i]); + u32 reg_id = le32_to_cpu(trigger->data[i]), reg_type; struct iwl_fw_ini_region_cfg *reg; if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs))) @@ -1802,7 +1877,7 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt, reg = fwrt->dump.active_regs[reg_id]; if (!reg) { IWL_WARN(fwrt, - "WRT: unassigned region id %d, skipping\n", + "WRT: Unassigned region id %d, skipping\n", reg_id); continue; } @@ -1811,205 +1886,55 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt, if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON) continue; - switch (le32_to_cpu(reg->region_type)) { - case IWL_FW_INI_REGION_DEVICE_MEMORY: - case IWL_FW_INI_REGION_PERIPHERY_MAC: - case IWL_FW_INI_REGION_PERIPHERY_PHY: - case IWL_FW_INI_REGION_PERIPHERY_AUX: - case IWL_FW_INI_REGION_CSR: - case IWL_FW_INI_REGION_LMAC_ERROR_TABLE: - case IWL_FW_INI_REGION_UMAC_ERROR_TABLE: - size = iwl_dump_ini_mem_get_size(fwrt, reg); - if (size) - ret_size += hdr_len + size; - break; - case IWL_FW_INI_REGION_TXF: - size = iwl_dump_ini_txf_get_size(fwrt, reg); - if (size) - ret_size += hdr_len + size; - break; - case IWL_FW_INI_REGION_RXF: - size = iwl_dump_ini_rxf_get_size(fwrt, reg); - if (size) - ret_size += hdr_len + size; - break; - case IWL_FW_INI_REGION_PAGING: - if (iwl_fw_dbg_is_paging_enabled(fwrt)) - size = iwl_dump_ini_paging_get_size(fwrt, reg); - else - size = iwl_dump_ini_paging_gen2_get_size(fwrt, - reg); - if (size) - ret_size += hdr_len + size; - break; - case IWL_FW_INI_REGION_DRAM_BUFFER: - if (!fwrt->trans->dbg.num_blocks) - break; - size = iwl_dump_ini_mon_dram_get_size(fwrt, reg); - if (size) - ret_size += hdr_len + size; - break; - case IWL_FW_INI_REGION_INTERNAL_BUFFER: - size = iwl_dump_ini_mon_smem_get_size(fwrt, reg); - if (size) - ret_size += hdr_len + size; - break; - case IWL_FW_INI_REGION_DRAM_IMR: - /* Undefined yet */ - default: - break; - } - } - - /* add dump info size */ - if (ret_size) - ret_size += hdr_len + sizeof(struct iwl_fw_ini_dump_info) + - (le32_to_cpu(trigger->num_regions) * sizeof(__le32)); - - return ret_size; -} - -static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_trigger *trigger, - struct iwl_fw_error_dump_data **data) -{ - int i, num = le32_to_cpu(trigger->num_regions); - - iwl_dump_ini_info(fwrt, trigger, data); - - for (i = 0; i < num; i++) { - u32 reg_id = le32_to_cpu(trigger->data[i]); - struct iwl_fw_ini_region_cfg *reg; - struct iwl_dump_ini_mem_ops ops; - - if (reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)) + reg_type = le32_to_cpu(reg->region_type); + if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops)) continue; - reg = fwrt->dump.active_regs[reg_id]; - /* Don't warn, get_trigger_len already warned */ - if (!reg) - continue; - - /* currently the driver supports always on domain only */ - if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON) - continue; + size += iwl_dump_ini_mem(fwrt, list, reg, + &iwl_dump_ini_region_ops[reg_type]); + } - switch (le32_to_cpu(reg->region_type)) { - case IWL_FW_INI_REGION_DEVICE_MEMORY: - case IWL_FW_INI_REGION_LMAC_ERROR_TABLE: - case IWL_FW_INI_REGION_UMAC_ERROR_TABLE: - ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; - ops.get_size = iwl_dump_ini_mem_get_size; - ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; - ops.fill_range = iwl_dump_ini_dev_mem_iter; - iwl_dump_ini_mem(fwrt, data, reg, &ops); - break; - case IWL_FW_INI_REGION_PERIPHERY_MAC: - case IWL_FW_INI_REGION_PERIPHERY_PHY: - case IWL_FW_INI_REGION_PERIPHERY_AUX: - ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; - ops.get_size = iwl_dump_ini_mem_get_size; - ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; - ops.fill_range = iwl_dump_ini_prph_iter; - iwl_dump_ini_mem(fwrt, data, reg, &ops); - break; - case IWL_FW_INI_REGION_DRAM_BUFFER: - ops.get_num_of_ranges = iwl_dump_ini_mon_dram_ranges; - ops.get_size = iwl_dump_ini_mon_dram_get_size; - ops.fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header; - ops.fill_range = iwl_dump_ini_mon_dram_iter; - iwl_dump_ini_mem(fwrt, data, reg, &ops); - break; - case IWL_FW_INI_REGION_INTERNAL_BUFFER: - ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; - ops.get_size = iwl_dump_ini_mon_smem_get_size; - ops.fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header; - ops.fill_range = iwl_dump_ini_dev_mem_iter; - iwl_dump_ini_mem(fwrt, data, reg, &ops); - break; - case IWL_FW_INI_REGION_PAGING: - ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; - if (iwl_fw_dbg_is_paging_enabled(fwrt)) { - ops.get_num_of_ranges = - iwl_dump_ini_paging_ranges; - ops.get_size = iwl_dump_ini_paging_get_size; - ops.fill_range = iwl_dump_ini_paging_iter; - } else { - ops.get_num_of_ranges = - iwl_dump_ini_paging_gen2_ranges; - ops.get_size = - iwl_dump_ini_paging_gen2_get_size; - ops.fill_range = iwl_dump_ini_paging_gen2_iter; - } + if (size) + size += iwl_dump_ini_info(fwrt, trigger, list); - iwl_dump_ini_mem(fwrt, data, reg, &ops); - break; - case IWL_FW_INI_REGION_TXF: { - struct iwl_ini_txf_iter_data iter = { .init = true }; - void *fifo_iter = fwrt->dump.fifo_iter; - - fwrt->dump.fifo_iter = &iter; - ops.get_num_of_ranges = iwl_dump_ini_txf_ranges; - ops.get_size = iwl_dump_ini_txf_get_size; - ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; - ops.fill_range = iwl_dump_ini_txf_iter; - iwl_dump_ini_mem(fwrt, data, reg, &ops); - fwrt->dump.fifo_iter = fifo_iter; - break; - } - case IWL_FW_INI_REGION_RXF: - ops.get_num_of_ranges = iwl_dump_ini_rxf_ranges; - ops.get_size = iwl_dump_ini_rxf_get_size; - ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; - ops.fill_range = iwl_dump_ini_rxf_iter; - iwl_dump_ini_mem(fwrt, data, reg, &ops); - break; - case IWL_FW_INI_REGION_CSR: - ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; - ops.get_size = iwl_dump_ini_mem_get_size; - ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; - ops.fill_range = iwl_dump_ini_csr_iter; - iwl_dump_ini_mem(fwrt, data, reg, &ops); - break; - case IWL_FW_INI_REGION_DRAM_IMR: - /* This is undefined yet */ - default: - break; - } - } + return size; } -static struct iwl_fw_error_dump_file * -iwl_fw_error_ini_dump_file(struct iwl_fw_runtime *fwrt, - enum iwl_fw_ini_trigger_id trig_id) +static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_trigger_id trig_id, + struct list_head *list) { - int size; - struct iwl_fw_error_dump_data *dump_data; - struct iwl_fw_error_dump_file *dump_file; + struct iwl_fw_ini_dump_entry *entry; + struct iwl_fw_ini_dump_file_hdr *hdr; struct iwl_fw_ini_trigger *trigger; + u32 size; if (!iwl_fw_ini_trigger_on(fwrt, trig_id)) - return NULL; + return 0; trigger = fwrt->dump.active_trigs[trig_id].trig; + if (!trigger || !le32_to_cpu(trigger->num_regions)) + return 0; - size = iwl_fw_ini_get_trigger_len(fwrt, trigger); - if (!size) - return NULL; + entry = kmalloc(sizeof(*entry) + sizeof(*hdr), GFP_KERNEL); + if (!entry) + return 0; - size += sizeof(*dump_file); + entry->size = sizeof(*hdr); - dump_file = vzalloc(size); - if (!dump_file) - return NULL; + size = iwl_dump_ini_trigger(fwrt, trigger, list); + if (!size) { + kfree(entry); + return 0; + } - dump_file->barker = cpu_to_le32(IWL_FW_INI_ERROR_DUMP_BARKER); - dump_data = (void *)dump_file->data; - dump_file->file_len = cpu_to_le32(size); + hdr = (void *)entry->data; + hdr->barker = cpu_to_le32(IWL_FW_INI_ERROR_DUMP_BARKER); + hdr->file_len = cpu_to_le32(size + entry->size); - iwl_fw_ini_dump_trigger(fwrt, trigger, &dump_data); + list_add(&entry->list, list); - return dump_file; + return le32_to_cpu(hdr->file_len); } static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) @@ -2058,27 +1983,44 @@ out: iwl_fw_free_dump_desc(fwrt); } +static void iwl_dump_ini_list_free(struct list_head *list) +{ + while (!list_empty(list)) { + struct iwl_fw_ini_dump_entry *entry = + list_entry(list->next, typeof(*entry), list); + + list_del(&entry->list); + kfree(entry); + } +} + static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, u8 wk_idx) { enum iwl_fw_ini_trigger_id trig_id = fwrt->dump.wks[wk_idx].ini_trig_id; - struct iwl_fw_error_dump_file *dump_file; + struct list_head dump_list = LIST_HEAD_INIT(dump_list); struct scatterlist *sg_dump_data; u32 file_len; - dump_file = iwl_fw_error_ini_dump_file(fwrt, trig_id); - if (!dump_file) + file_len = iwl_dump_ini_file_gen(fwrt, trig_id, &dump_list); + if (!file_len) goto out; - file_len = le32_to_cpu(dump_file->file_len); - sg_dump_data = alloc_sgtable(file_len); if (sg_dump_data) { - sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), - dump_file, file_len, 0); + struct iwl_fw_ini_dump_entry *entry; + int sg_entries = sg_nents(sg_dump_data); + u32 offs = 0; + + list_for_each_entry(entry, &dump_list, list) { + sg_pcopy_from_buffer(sg_dump_data, sg_entries, + entry->data, entry->size, offs); + offs += entry->size; + } dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } - vfree(dump_file); + iwl_dump_ini_list_free(&dump_list); + out: fwrt->dump.wks[wk_idx].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID; } @@ -2098,7 +2040,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, u32 trig_type = le32_to_cpu(desc->trig_desc.type); int ret; - if (fwrt->trans->dbg.ini_valid) { + if (iwl_trans_dbg_ini_valid(fwrt->trans)) { ret = iwl_fw_dbg_ini_collect(fwrt, trig_type); if (!ret) iwl_fw_free_dump_desc(fwrt); @@ -2220,7 +2162,7 @@ int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, active->trig->occurrences = cpu_to_le32(--occur); if (le32_to_cpu(active->trig->force_restart)) { - IWL_WARN(fwrt, "WRT: force restart: trigger %d fired.\n", id); + IWL_WARN(fwrt, "WRT: Force restart: trigger %d fired.\n", id); iwl_force_nmi(fwrt->trans); return 0; } @@ -2240,7 +2182,7 @@ int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, fwrt->dump.wks[idx].ini_trig_id = id; - IWL_WARN(fwrt, "WRT: collecting data: ini trigger %d fired.\n", id); + IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired.\n", id); schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); @@ -2372,16 +2314,19 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) goto out; } - iwl_fw_dbg_stop_recording(fwrt->trans, ¶ms); + if (iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true)) { + IWL_ERR(fwrt, "Failed to stop DBGC recording, aborting dump\n"); + goto out; + } - IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection start\n"); - if (fwrt->trans->dbg.ini_valid) + IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n"); + if (iwl_trans_dbg_ini_valid(fwrt->trans)) iwl_fw_error_ini_dump(fwrt, wk_idx); else iwl_fw_error_dump(fwrt); - IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection done\n"); + IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n"); - iwl_fw_dbg_restart_recording(fwrt, ¶ms); + iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false); out: clear_bit(wk_idx, &fwrt->dump.active_wks); @@ -2432,472 +2377,17 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt) } IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data); -static void iwl_fw_dbg_info_apply(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_debug_info_tlv *dbg_info, - bool ext, enum iwl_fw_ini_apply_point pnt) -{ - u32 img_name_len = le32_to_cpu(dbg_info->img_name_len); - u32 dbg_cfg_name_len = le32_to_cpu(dbg_info->dbg_cfg_name_len); - - if (img_name_len != IWL_FW_INI_MAX_IMG_NAME_LEN) { - IWL_WARN(fwrt, - "WRT: ext=%d. Invalid image name length %d, expected %d\n", - ext, img_name_len, - IWL_FW_INI_MAX_IMG_NAME_LEN); - return; - } - - if (dbg_cfg_name_len != IWL_FW_INI_MAX_DBG_CFG_NAME_LEN) { - IWL_WARN(fwrt, - "WRT: ext=%d. Invalid debug cfg name length %d, expected %d\n", - ext, dbg_cfg_name_len, - IWL_FW_INI_MAX_DBG_CFG_NAME_LEN); - return; - } - - if (ext) { - memcpy(fwrt->dump.external_dbg_cfg_name, dbg_info->dbg_cfg_name, - sizeof(fwrt->dump.external_dbg_cfg_name)); - } else { - memcpy(fwrt->dump.img_name, dbg_info->img_name, - sizeof(fwrt->dump.img_name)); - memcpy(fwrt->dump.internal_dbg_cfg_name, dbg_info->dbg_cfg_name, - sizeof(fwrt->dump.internal_dbg_cfg_name)); - } -} - -static void -iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, u32 size) -{ - struct iwl_trans *trans = fwrt->trans; - void *virtual_addr = NULL; - dma_addr_t phys_addr; - - if (WARN_ON_ONCE(trans->dbg.num_blocks == - ARRAY_SIZE(trans->dbg.fw_mon))) - return; - - virtual_addr = - dma_alloc_coherent(fwrt->trans->dev, size, &phys_addr, - GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO | - __GFP_COMP); - - /* TODO: alloc fragments if needed */ - if (!virtual_addr) - IWL_ERR(fwrt, "Failed to allocate debug memory\n"); - - IWL_DEBUG_FW(trans, - "Allocated DRAM buffer[%d], size=0x%x\n", - trans->dbg.num_blocks, size); - - trans->dbg.fw_mon[trans->dbg.num_blocks].block = virtual_addr; - trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys_addr; - trans->dbg.fw_mon[trans->dbg.num_blocks].size = size; - trans->dbg.num_blocks++; -} - -static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_allocation_data *alloc, - enum iwl_fw_ini_apply_point pnt) -{ - struct iwl_trans *trans = fwrt->trans; - struct iwl_ldbg_config_cmd ldbg_cmd = { - .type = cpu_to_le32(BUFFER_ALLOCATION), - }; - struct iwl_buffer_allocation_cmd *cmd = &ldbg_cmd.buffer_allocation; - struct iwl_host_cmd hcmd = { - .id = LDBG_CONFIG_CMD, - .flags = CMD_ASYNC, - .data[0] = &ldbg_cmd, - .len[0] = sizeof(ldbg_cmd), - }; - int block_idx = trans->dbg.num_blocks; - u32 buf_location = le32_to_cpu(alloc->tlv.buffer_location); - - if (fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID) - fwrt->trans->dbg.ini_dest = buf_location; - - if (buf_location != fwrt->trans->dbg.ini_dest) { - WARN(fwrt, - "WRT: attempt to override buffer location on apply point %d\n", - pnt); - - return; - } - - if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH) { - IWL_DEBUG_FW(trans, "WRT: applying SMEM buffer destination\n"); - /* set sram monitor by enabling bit 7 */ - iwl_set_bit(fwrt->trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); - - return; - } - - if (buf_location != IWL_FW_INI_LOCATION_DRAM_PATH) - return; - - if (!alloc->is_alloc) { - iwl_fw_dbg_buffer_allocation(fwrt, - le32_to_cpu(alloc->tlv.size)); - if (block_idx == trans->dbg.num_blocks) - return; - alloc->is_alloc = 1; - } - - /* First block is assigned via registers / context info */ - if (trans->dbg.num_blocks == 1) - return; - - IWL_DEBUG_FW(trans, - "WRT: applying DRAM buffer[%d] destination\n", block_idx); - - cmd->num_frags = cpu_to_le32(1); - cmd->fragments[0].address = - cpu_to_le64(trans->dbg.fw_mon[block_idx].physical); - cmd->fragments[0].size = alloc->tlv.size; - cmd->allocation_id = alloc->tlv.allocation_id; - cmd->buffer_location = alloc->tlv.buffer_location; - - iwl_trans_send_cmd(trans, &hcmd); -} - -static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt, - struct iwl_ucode_tlv *tlv, - bool ext) -{ - struct iwl_fw_ini_hcmd_tlv *hcmd_tlv = (void *)&tlv->data[0]; - struct iwl_fw_ini_hcmd *data = &hcmd_tlv->hcmd; - u16 len = le32_to_cpu(tlv->length) - sizeof(*hcmd_tlv); - - struct iwl_host_cmd hcmd = { - .id = WIDE_ID(data->group, data->id), - .len = { len, }, - .data = { data->data, }, - }; - - /* currently the driver supports always on domain only */ - if (le32_to_cpu(hcmd_tlv->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON) - return; - - IWL_DEBUG_FW(fwrt, - "WRT: ext=%d. Sending host command id=0x%x, group=0x%x\n", - ext, data->id, data->group); - - iwl_trans_send_cmd(fwrt->trans, &hcmd); -} - -static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_region_tlv *tlv, - bool ext, enum iwl_fw_ini_apply_point pnt) -{ - void *iter = (void *)tlv->region_config; - int i, size = le32_to_cpu(tlv->num_regions); - const char *err_st = - "WRT: ext=%d. Invalid region %s %d for apply point %d\n"; - - for (i = 0; i < size; i++) { - struct iwl_fw_ini_region_cfg *reg = iter, **active; - int id = le32_to_cpu(reg->region_id); - u32 type = le32_to_cpu(reg->region_type); - - if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs), err_st, ext, - "id", id, pnt)) - break; - - if (WARN(type == 0 || type >= IWL_FW_INI_REGION_NUM, err_st, - ext, "type", type, pnt)) - break; - - active = &fwrt->dump.active_regs[id]; - - if (*active) - IWL_WARN(fwrt->trans, - "WRT: ext=%d. Region id %d override\n", - ext, id); - - IWL_DEBUG_FW(fwrt, - "WRT: ext=%d. Activating region id %d\n", - ext, id); - - *active = reg; - - if (type == IWL_FW_INI_REGION_TXF || - type == IWL_FW_INI_REGION_RXF) - iter += le32_to_cpu(reg->fifos.num_of_registers) * - sizeof(__le32); - else if (type == IWL_FW_INI_REGION_DEVICE_MEMORY || - type == IWL_FW_INI_REGION_PERIPHERY_MAC || - type == IWL_FW_INI_REGION_PERIPHERY_PHY || - type == IWL_FW_INI_REGION_PERIPHERY_AUX || - type == IWL_FW_INI_REGION_INTERNAL_BUFFER || - type == IWL_FW_INI_REGION_PAGING || - type == IWL_FW_INI_REGION_CSR || - type == IWL_FW_INI_REGION_LMAC_ERROR_TABLE || - type == IWL_FW_INI_REGION_UMAC_ERROR_TABLE) - iter += le32_to_cpu(reg->internal.num_of_ranges) * - sizeof(__le32); - - iter += sizeof(*reg); - } -} - -static int iwl_fw_dbg_trig_realloc(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_active_triggers *active, - u32 id, int size) -{ - void *ptr; - - if (size <= active->size) - return 0; - - ptr = krealloc(active->trig, size, GFP_KERNEL); - if (!ptr) { - IWL_ERR(fwrt, "WRT: Failed to allocate memory for trigger %d\n", - id); - return -ENOMEM; - } - active->trig = ptr; - active->size = size; - - return 0; -} - -static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_trigger_tlv *tlv, - bool ext, - enum iwl_fw_ini_apply_point apply_point) -{ - int i, size = le32_to_cpu(tlv->num_triggers); - void *iter = (void *)tlv->trigger_config; - - for (i = 0; i < size; i++) { - struct iwl_fw_ini_trigger *trig = iter; - struct iwl_fw_ini_active_triggers *active; - int id = le32_to_cpu(trig->trigger_id); - u32 trig_regs_size = le32_to_cpu(trig->num_regions) * - sizeof(__le32); - - if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_trigs), - "WRT: ext=%d. Invalid trigger id %d for apply point %d\n", - ext, id, apply_point)) - break; - - active = &fwrt->dump.active_trigs[id]; - - if (!active->active) { - size_t trig_size = sizeof(*trig) + trig_regs_size; - - IWL_DEBUG_FW(fwrt, - "WRT: ext=%d. Activating trigger %d\n", - ext, id); - - if (iwl_fw_dbg_trig_realloc(fwrt, active, id, - trig_size)) - goto next; - - memcpy(active->trig, trig, trig_size); - - } else { - u32 conf_override = - !(le32_to_cpu(trig->override_trig) & 0xff); - u32 region_override = - !(le32_to_cpu(trig->override_trig) & 0xff00); - u32 offset = 0; - u32 active_regs = - le32_to_cpu(active->trig->num_regions); - u32 new_regs = le32_to_cpu(trig->num_regions); - int mem_to_add = trig_regs_size; - - if (region_override) { - IWL_DEBUG_FW(fwrt, - "WRT: ext=%d. Trigger %d regions override\n", - ext, id); - - mem_to_add -= active_regs * sizeof(__le32); - } else { - IWL_DEBUG_FW(fwrt, - "WRT: ext=%d. Trigger %d regions appending\n", - ext, id); - - offset += active_regs; - new_regs += active_regs; - } - - if (iwl_fw_dbg_trig_realloc(fwrt, active, id, - active->size + mem_to_add)) - goto next; - - if (conf_override) { - IWL_DEBUG_FW(fwrt, - "WRT: ext=%d. Trigger %d configuration override\n", - ext, id); - - memcpy(active->trig, trig, sizeof(*trig)); - } - - memcpy(active->trig->data + offset, trig->data, - trig_regs_size); - active->trig->num_regions = cpu_to_le32(new_regs); - } - - /* Since zero means infinity - just set to -1 */ - if (!le32_to_cpu(active->trig->occurrences)) - active->trig->occurrences = cpu_to_le32(-1); - - active->active = true; - - if (id == IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER) { - u32 collect_interval = le32_to_cpu(trig->trigger_data); - - /* the minimum allowed interval is 50ms */ - if (collect_interval < 50) { - collect_interval = 50; - trig->trigger_data = - cpu_to_le32(collect_interval); - } - - mod_timer(&fwrt->dump.periodic_trig, - jiffies + msecs_to_jiffies(collect_interval)); - } -next: - iter += sizeof(*trig) + trig_regs_size; - - } -} - -static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, - struct iwl_apply_point_data *data, - enum iwl_fw_ini_apply_point pnt, - bool ext) -{ - void *iter = data->data; - - while (iter && iter < data->data + data->size) { - struct iwl_ucode_tlv *tlv = iter; - void *ini_tlv = (void *)tlv->data; - u32 type = le32_to_cpu(tlv->type); - - switch (type) { - case IWL_UCODE_TLV_TYPE_DEBUG_INFO: - iwl_fw_dbg_info_apply(fwrt, ini_tlv, ext, pnt); - break; - case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: { - struct iwl_fw_ini_allocation_data *buf_alloc = ini_tlv; - - if (pnt != IWL_FW_INI_APPLY_EARLY) { - IWL_ERR(fwrt, - "WRT: ext=%d. Invalid apply point %d for buffer allocation\n", - ext, pnt); - goto next; - } - - iwl_fw_dbg_buffer_apply(fwrt, ini_tlv, pnt); - iter += sizeof(buf_alloc->is_alloc); - break; - } - case IWL_UCODE_TLV_TYPE_HCMD: - if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) { - IWL_ERR(fwrt, - "WRT: ext=%d. Invalid apply point %d for host command\n", - ext, pnt); - goto next; - } - iwl_fw_dbg_send_hcmd(fwrt, tlv, ext); - break; - case IWL_UCODE_TLV_TYPE_REGIONS: - iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt); - break; - case IWL_UCODE_TLV_TYPE_TRIGGERS: - iwl_fw_dbg_update_triggers(fwrt, ini_tlv, ext, pnt); - break; - case IWL_UCODE_TLV_TYPE_DEBUG_FLOW: - break; - default: - WARN_ONCE(1, - "WRT: ext=%d. Invalid TLV 0x%x for apply point\n", - ext, type); - break; - } -next: - iter += sizeof(*tlv) + le32_to_cpu(tlv->length); - } -} - -static void iwl_fw_dbg_ini_reset_cfg(struct iwl_fw_runtime *fwrt) +void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt) { int i; - for (i = 0; i < IWL_FW_INI_MAX_REGION_ID; i++) - fwrt->dump.active_regs[i] = NULL; - - /* disable the triggers, used in recovery flow */ - for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++) - fwrt->dump.active_trigs[i].active = false; - - memset(fwrt->dump.img_name, 0, - sizeof(fwrt->dump.img_name)); - memset(fwrt->dump.internal_dbg_cfg_name, 0, - sizeof(fwrt->dump.internal_dbg_cfg_name)); - memset(fwrt->dump.external_dbg_cfg_name, 0, - sizeof(fwrt->dump.external_dbg_cfg_name)); - - fwrt->trans->dbg.ini_dest = IWL_FW_INI_LOCATION_INVALID; -} - -void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, - enum iwl_fw_ini_apply_point apply_point) -{ - void *data = &fwrt->trans->dbg.apply_points[apply_point]; - - IWL_DEBUG_FW(fwrt, "WRT: enabling apply point %d\n", apply_point); - - if (apply_point == IWL_FW_INI_APPLY_EARLY) - iwl_fw_dbg_ini_reset_cfg(fwrt); - - _iwl_fw_dbg_apply_point(fwrt, data, apply_point, false); - - data = &fwrt->trans->dbg.apply_points_ext[apply_point]; - _iwl_fw_dbg_apply_point(fwrt, data, apply_point, true); -} -IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point); - -void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt) -{ - int i; - - del_timer(&fwrt->dump.periodic_trig); + iwl_dbg_tlv_del_timers(fwrt->trans); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) iwl_fw_dbg_collect_sync(fwrt, i); - iwl_trans_stop_device(fwrt->trans); -} -IWL_EXPORT_SYMBOL(iwl_fwrt_stop_device); - -void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t) -{ - struct iwl_fw_runtime *fwrt; - enum iwl_fw_ini_trigger_id id = IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER; - int ret; - typeof(fwrt->dump) *dump_ptr = container_of(t, typeof(fwrt->dump), - periodic_trig); - - fwrt = container_of(dump_ptr, typeof(*fwrt), dump); - - ret = _iwl_fw_dbg_ini_collect(fwrt, id); - if (!ret || ret == -EBUSY) { - struct iwl_fw_ini_trigger *trig = - fwrt->dump.active_trigs[id].trig; - u32 occur = le32_to_cpu(trig->occurrences); - u32 collect_interval = le32_to_cpu(trig->trigger_data); - - if (!occur) - return; - - mod_timer(&fwrt->dump.periodic_trig, - jiffies + msecs_to_jiffies(collect_interval)); - } + iwl_fw_dbg_stop_restart_recording(fwrt, NULL, true); } +IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync); #define FSEQ_REG(x) { .addr = (x), .str = #x, } @@ -2937,3 +2427,92 @@ void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt) iwl_trans_release_nic_access(trans, &flags); } IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs); + +static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend) +{ + struct iwl_dbg_suspend_resume_cmd cmd = { + .operation = suspend ? + cpu_to_le32(DBGC_SUSPEND_CMD) : + cpu_to_le32(DBGC_RESUME_CMD), + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DEBUG_GROUP, DBGC_SUSPEND_RESUME), + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + + return iwl_trans_send_cmd(trans, &hcmd); +} + +static void iwl_fw_dbg_stop_recording(struct iwl_trans *trans, + struct iwl_fw_dbg_params *params) +{ + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { + iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); + return; + } + + if (params) { + params->in_sample = iwl_read_umac_prph(trans, DBGC_IN_SAMPLE); + params->out_ctrl = iwl_read_umac_prph(trans, DBGC_OUT_CTRL); + } + + iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0); + /* wait for the DBGC to finish writing the internal buffer to DRAM to + * avoid halting the HW while writing + */ + usleep_range(700, 1000); + iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0); +} + +static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans, + struct iwl_fw_dbg_params *params) +{ + if (!params) + return -EIO; + + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { + iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); + iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); + iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); + } else { + iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample); + iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl); + } + + return 0; +} + +int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_params *params, + bool stop) +{ + int ret = 0; + + /* if the FW crashed or not debug monitor cfg was given, there is + * no point in changing the recording state + */ + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) || + (!fwrt->trans->dbg.dest_tlv && + fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID)) + return 0; + + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP)) + ret = iwl_fw_dbg_suspend_resume_hcmd(fwrt->trans, stop); + else if (stop) + iwl_fw_dbg_stop_recording(fwrt->trans, params); + else + ret = iwl_fw_dbg_restart_recording(fwrt->trans, params); +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (!ret) { + if (stop) + fwrt->trans->dbg.rec_on = false; + else + iwl_fw_set_dbg_rec_on(fwrt); + } +#endif + + return ret; +} +IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index a8459ac71b2c..e3b5dd34643f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -202,7 +202,7 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt, { struct iwl_fw_dbg_trigger_tlv *trig; - if (fwrt->trans->dbg.ini_valid) + if (iwl_trans_dbg_ini_valid(fwrt->trans)) return NULL; if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id)) @@ -229,8 +229,9 @@ iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger *trig; u32 usec; - if (!fwrt->trans->dbg.ini_valid || id == IWL_FW_TRIGGER_ID_INVALID || - id >= IWL_FW_TRIGGER_ID_NUM || !fwrt->dump.active_trigs[id].active) + if (!iwl_trans_dbg_ini_valid(fwrt->trans) || + id == IWL_FW_TRIGGER_ID_INVALID || id >= IWL_FW_TRIGGER_ID_NUM || + !fwrt->dump.active_trigs[id].active) return false; trig = fwrt->dump.active_trigs[id].trig; @@ -262,69 +263,9 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \ iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) - -static inline void -_iwl_fw_dbg_stop_recording(struct iwl_trans *trans, - struct iwl_fw_dbg_params *params) -{ - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); - return; - } - - if (params) { - params->in_sample = iwl_read_umac_prph(trans, DBGC_IN_SAMPLE); - params->out_ctrl = iwl_read_umac_prph(trans, DBGC_OUT_CTRL); - } - - iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0); - /* wait for the DBGC to finish writing the internal buffer to DRAM to - * avoid halting the HW while writing - */ - usleep_range(700, 1000); - iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0); -#ifdef CONFIG_IWLWIFI_DEBUGFS - trans->dbg.rec_on = false; -#endif -} - -static inline void -iwl_fw_dbg_stop_recording(struct iwl_trans *trans, - struct iwl_fw_dbg_params *params) -{ - /* if the FW crashed or not debug monitor cfg was given, there is - * no point in stopping - */ - if (test_bit(STATUS_FW_ERROR, &trans->status) || - (!trans->dbg.dest_tlv && - trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID)) - return; - - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { - IWL_ERR(trans, - "WRT: unsupported device family %d for debug stop recording\n", - trans->cfg->device_family); - return; - } - _iwl_fw_dbg_stop_recording(trans, params); -} - -static inline void -_iwl_fw_dbg_restart_recording(struct iwl_trans *trans, - struct iwl_fw_dbg_params *params) -{ - if (WARN_ON(!params)) - return; - - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); - iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); - iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); - } else { - iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample); - iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl); - } -} +int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_params *params, + bool stop); #ifdef CONFIG_IWLWIFI_DEBUGFS static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt) @@ -336,30 +277,6 @@ static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt) } #endif -static inline void -iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dbg_params *params) -{ - /* if the FW crashed or not debug monitor cfg was given, there is - * no point in restarting - */ - if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) || - (!fwrt->trans->dbg.dest_tlv && - fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID)) - return; - - if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { - IWL_ERR(fwrt, - "WRT: unsupported device family %d for debug restart recording\n", - fwrt->trans->cfg->device_family); - return; - } - _iwl_fw_dbg_restart_recording(fwrt->trans, params); -#ifdef CONFIG_IWLWIFI_DEBUGFS - iwl_fw_set_dbg_rec_on(fwrt); -#endif -} - static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) { fwrt->dump.conf = FW_DBG_INVALID; @@ -385,7 +302,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt) { return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) && - !fwrt->trans->cfg->gen2 && + !fwrt->trans->trans_cfg->gen2 && fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block; @@ -397,24 +314,13 @@ static inline void iwl_fw_flush_dumps(struct iwl_fw_runtime *fwrt) { int i; - del_timer(&fwrt->dump.periodic_trig); + iwl_dbg_tlv_del_timers(fwrt->trans); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) { flush_delayed_work(&fwrt->dump.wks[i].wk); fwrt->dump.wks[i].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID; } } -static inline void iwl_fw_cancel_dumps(struct iwl_fw_runtime *fwrt) -{ - int i; - - del_timer(&fwrt->dump.periodic_trig); - for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) { - cancel_delayed_work_sync(&fwrt->dump.wks[i].wk); - fwrt->dump.wks[i].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID; - } -} - #ifdef CONFIG_IWLWIFI_DEBUGFS static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) { @@ -451,10 +357,7 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} #endif /* CONFIG_IWLWIFI_DEBUGFS */ -void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, - enum iwl_fw_ini_apply_point apply_point); - -void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt); +void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt); static inline void iwl_fw_lmac1_set_alive_err_table(struct iwl_trans *trans, u32 lmac_error_event_table) @@ -478,7 +381,7 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans, static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) { - if (fwrt->trans->dbg.ini_valid && fwrt->trans->dbg.hw_error) { + if (iwl_trans_dbg_ini_valid(fwrt->trans) && fwrt->trans->dbg.hw_error) { _iwl_fw_dbg_ini_collect(fwrt, IWL_FW_TRIGGER_ID_FW_HW_ERROR); fwrt->trans->dbg.hw_error = false; } else { @@ -486,8 +389,6 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) } } -void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t); - void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt); static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 00a45ea85b69..2e763678dbdb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -288,6 +288,28 @@ struct iwl_fw_error_dump_mem { #define IWL_INI_DUMP_INFO_TYPE BIT(31) /** + * struct iwl_fw_ini_dump_entry + * @list: list of dump entries + * @size: size of the data + * @data: entry data + */ +struct iwl_fw_ini_dump_entry { + struct list_head list; + u32 size; + u8 data[]; +} __packed; + +/** + * struct iwl_fw_error_dump_file - header of dump file + * @barker: must be %IWL_FW_INI_ERROR_DUMP_BARKER + * @file_len: the length of all the file including the header + */ +struct iwl_fw_ini_dump_file_hdr { + __le32 barker; + __le32 file_len; +} __packed; + +/** * struct iwl_fw_ini_fifo_hdr - fifo range header * @fifo_num: the fifo number. In case of umac rx fifo, set BIT(31) to * distinguish between lmac and umac rx fifos @@ -301,10 +323,10 @@ struct iwl_fw_ini_fifo_hdr { /** * struct iwl_fw_ini_error_dump_range - range of memory * @range_data_size: the size of this range, in bytes - * @internal_base_addr - base address of internal memory range - * @dram_base_addr - base address of dram monitor range - * @page_num - page number of memory range - * @fifo_hdr - fifo header of memory range + * @internal_base_addr: base address of internal memory range + * @dram_base_addr: base address of dram monitor range + * @page_num: page number of memory range + * @fifo_hdr: fifo header of memory range * @data: the actual memory */ struct iwl_fw_ini_error_dump_range { @@ -432,10 +454,10 @@ struct iwl_fw_error_dump_rb { /** * struct iwl_fw_ini_monitor_dump - ini monitor dump - * @header - header of the region - * @write_ptr - write pointer position in the buffer - * @cycle_cnt - cycles count - * @ranges - the memory ranges of this this region + * @header: header of the region + * @write_ptr: write pointer position in the buffer + * @cycle_cnt: cycles count + * @ranges: the memory ranges of this this region */ struct iwl_fw_ini_monitor_dump { struct iwl_fw_ini_error_dump_header header; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 0c38e7392b61..423cc0cf8e78 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -157,8 +157,7 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2, IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_TLV_DEBUG_BASE + 3, IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_TLV_DEBUG_BASE + 4, - IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_TLV_DEBUG_BASE + 5, - IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_DEBUG_FLOW, + IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_TRIGGERS, /* TLVs 0x1000-0x2000 are for internal driver usage */ IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000, @@ -323,6 +322,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54, IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55, IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57, + IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ @@ -441,9 +441,11 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = (__force iwl_ucode_tlv_capa_t)46, - IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48, IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = (__force iwl_ucode_tlv_capa_t)47, + IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48, IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49, + IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50, + IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52, /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, @@ -465,6 +467,8 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88, IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89, IWL_UCODE_TLV_CAPA_CSI_REPORTING = (__force iwl_ucode_tlv_capa_t)90, + IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)92, + IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)93, /* set 3 */ IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, @@ -521,6 +525,10 @@ enum iwl_fw_phy_cfg { FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS, FW_PHY_CFG_RX_CHAIN_POS = 20, FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS, + FW_PHY_CFG_CHAIN_SAD_POS = 23, + FW_PHY_CFG_CHAIN_SAD_ENABLED = 0x1 << FW_PHY_CFG_CHAIN_SAD_POS, + FW_PHY_CFG_CHAIN_SAD_ANT_A = 0x2 << FW_PHY_CFG_CHAIN_SAD_POS, + FW_PHY_CFG_CHAIN_SAD_ANT_B = 0x4 << FW_PHY_CFG_CHAIN_SAD_POS, FW_PHY_CFG_SHARED_CLK = BIT(31), }; @@ -965,4 +973,19 @@ struct iwl_fw_cmd_version { u8 notif_ver; } __packed; +static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv, + size_t fixed_size, size_t var_size) +{ + size_t var_len = le32_to_cpu(tlv->length) - fixed_size; + + if (WARN_ON(var_len % var_size)) + return 0; + + return var_len / var_size; +} + +#define iwl_tlv_array_len(_tlv_ptr, _struct_ptr, _memb) \ + _iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), \ + sizeof(_struct_ptr->_memb[0])) + #endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 18ca5f152be6..039576d71276 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -228,15 +228,6 @@ struct iwl_fw_dbg { }; /** - * @tlv: the buffer allocation tlv - * @is_alloc: indicates if the buffer was already allocated - */ -struct iwl_fw_ini_allocation_data { - struct iwl_fw_ini_allocation_tlv tlv; - u32 is_alloc; -} __packed; - -/** * struct iwl_fw_ini_active_triggers * @active: is this trigger active * @size: allocated memory size of the trigger diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index c16d6e126e3c..ba00d162ce72 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -81,8 +81,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, INIT_DELAYED_WORK(&fwrt->dump.wks[i].wk, iwl_fw_error_dump_wk); } iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir); - timer_setup(&fwrt->dump.periodic_trig, - iwl_fw_dbg_periodic_trig_handler, 0); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c index 9b8dd7fe7112..2bd76bd9dfa5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -322,7 +322,7 @@ int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type) const struct fw_img *fw = &fwrt->fw->img[type]; int ret; - if (fwrt->trans->cfg->gen2) + if (fwrt->trans->trans_cfg->gen2) return 0; /* diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 406ef73992c1..be436c18a047 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -64,7 +64,6 @@ #include "iwl-trans.h" #include "img.h" #include "fw/api/debug.h" -#include "fw/api/dbg-tlv.h" #include "fw/api/paging.h" #include "iwl-eeprom-parse.h" @@ -92,6 +91,20 @@ struct iwl_fwrt_shared_mem_cfg { #define IWL_FW_RUNTIME_DUMP_WK_NUM 5 /** + * struct iwl_txf_iter_data - Tx fifo iterator data struct + * @fifo: fifo number + * @lmac: lmac number + * @fifo_size: fifo size + * @internal_txf: non zero if fifo is internal Tx fifo + */ +struct iwl_txf_iter_data { + int fifo; + int lmac; + u32 fifo_size; + u8 internal_txf; +}; + +/** * struct iwl_fw_runtime - runtime data for firmware * @fw: firmware image * @cfg: NIC configuration @@ -144,8 +157,8 @@ struct iwl_fw_runtime { struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM]; u32 lmac_err_id[MAX_NUM_LMAC]; u32 umac_err_id; - void *fifo_iter; - struct timer_list periodic_trig; + + struct iwl_txf_iter_data txf_iter_data; u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN]; u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN]; @@ -190,6 +203,10 @@ static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt) kfree(active->trig); active->trig = NULL; } + + iwl_dbg_tlv_del_timers(fwrt->trans); + for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) + cancel_delayed_work_sync(&fwrt->dump.wks[i].wk); } void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c index 557ee47bffd8..409b2dd854ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -151,7 +151,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) } pkt = cmd.resp_pkt; - if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) + if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) iwl_parse_shared_mem_22000(fwrt, pkt); else iwl_parse_shared_mem(fwrt, pkt); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 6c04f8223aff..214495a7165f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -161,7 +161,8 @@ static inline u8 num_of_ant(u8 mask) !!((mask) & ANT_C); } -/* +/** + * struct iwl_base_params - params not likely to change within a device family * @max_ll_items: max number of OTP blocks * @shadow_ram_support: shadow support for OTP memory * @led_compensation: compensate on the led on/off time per HW according @@ -331,7 +332,36 @@ struct iwl_csr_params { }; /** + * struct iwl_cfg_trans - information needed to start the trans + * + * These values cannot be changed when multiple configs are used for a + * single PCI ID, because they are needed before the HW REV or RFID + * can be read. + * + * @base_params: pointer to basic parameters + * @csr: csr flags and addresses that are different across devices + * @device_family: the device family + * @umac_prph_offset: offset to add to UMAC periphery address + * @rf_id: need to read rf_id to determine the firmware image + * @use_tfh: use TFH + * @gen2: 22000 and on transport operation + * @mq_rx_supported: multi-queue rx support + */ +struct iwl_cfg_trans_params { + const struct iwl_base_params *base_params; + const struct iwl_csr_params *csr; + enum iwl_device_family device_family; + u32 umac_prph_offset; + u32 rf_id:1, + use_tfh:1, + gen2:1, + mq_rx_supported:1, + bisr_workaround:1; +}; + +/** * struct iwl_cfg + * @trans: the trans-specific configuration part * @name: Official name of the device * @fw_name_pre: Firmware filename prefix. The api version and extension * (.ucode) will be added to filename before loading from disk. The @@ -346,10 +376,10 @@ struct iwl_csr_params { * @nvm_ver: NVM version * @nvm_calib_ver: NVM calibration version * @lib: pointer to the lib ops - * @base_params: pointer to basic parameters * @ht_params: point to ht parameters * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) * @rx_with_siso_diversity: 1x1 device with rx antenna diversity + * @tx_with_siso_diversity: 1x1 device with tx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device * @high_temp: Is this NIC is designated to be in high temperature. * @host_interrupt_operation_mode: device needs host interrupt operation @@ -358,7 +388,6 @@ struct iwl_csr_params { * @mac_addr_from_csr: read HW address from CSR registers * @features: hw features, any combination of feature_whitelist * @pwr_tx_backoffs: translation table between power limits and backoffs - * @csr: csr flags and addresses that are different across devices * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the @@ -371,18 +400,14 @@ struct iwl_csr_params { * @dccm2_len: length of the second DCCM * @smem_offset: offset from which the SMEM begins * @smem_len: the length of SMEM - * @mq_rx_supported: multi-queue rx support * @vht_mu_mimo_supported: VHT MU-MIMO support - * @rf_id: need to read rf_id to determine the firmware image * @integrated: discrete or integrated - * @gen2: 22000 and on transport operation * @cdb: CDB support * @nvm_type: see &enum iwl_nvm_type * @d3_debug_data_base_addr: base address where D3 debug data is stored * @d3_debug_data_length: length of the D3 debug data * @bisr_workaround: BISR hardware workaround (for 22260 series devices) * @min_txq_size: minimum number of slots required in a TX queue - * @umac_prph_offset: offset to add to UMAC periphery address * @uhb_supported: ultra high band channels supported * @min_256_ba_txq_size: minimum number of slots required in a TX queue which * supports 256 BA aggregation @@ -392,19 +417,16 @@ struct iwl_csr_params { * and/or the uCode API version instead. */ struct iwl_cfg { + struct iwl_cfg_trans_params trans; /* params specific to an individual device within a device family */ const char *name; const char *fw_name_pre; - /* params not likely to change within a device family */ - const struct iwl_base_params *base_params; /* params likely to change within a device family */ const struct iwl_ht_params *ht_params; const struct iwl_eeprom_params *eeprom_params; const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; const char *default_nvm_file_C_step; const struct iwl_tt_params *thermal_params; - const struct iwl_csr_params *csr; - enum iwl_device_family device_family; enum iwl_led_mode led_mode; enum iwl_nvm_type nvm_type; u32 max_data_size; @@ -420,6 +442,7 @@ struct iwl_cfg { u16 nvm_ver; u16 nvm_calib_ver; u32 rx_with_siso_diversity:1, + tx_with_siso_diversity:1, bt_shared_single_ant:1, internal_wimax_coex:1, host_interrupt_operation_mode:1, @@ -428,15 +451,10 @@ struct iwl_cfg { lp_xtal_workaround:1, disable_dummy_notification:1, apmg_not_supported:1, - mq_rx_supported:1, vht_mu_mimo_supported:1, - rf_id:1, integrated:1, - use_tfh:1, - gen2:1, cdb:1, dbgc_supported:1, - bisr_workaround:1, uhb_supported:1; u8 valid_tx_ant; u8 valid_rx_ant; @@ -453,7 +471,6 @@ struct iwl_cfg { u32 d3_debug_data_base_addr; u32 d3_debug_data_length; u32 min_txq_size; - u32 umac_prph_offset; u32 fw_mon_smem_write_ptr_addr; u32 fw_mon_smem_write_ptr_msk; u32 fw_mon_smem_cycle_cnt_ptr_addr; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index fcaec410b3be..3d7f8ff8ef58 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -60,149 +60,112 @@ *****************************************************************************/ #include <linux/firmware.h> +#include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-dbg-tlv.h" - -void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, - bool ext) +#include "fw/dbg.h" +#include "fw/runtime.h" + +/** + * enum iwl_dbg_tlv_type - debug TLV types + * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV + * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV + * @IWL_DBG_TLV_TYPE_HCMD: host command TLV + * @IWL_DBG_TLV_TYPE_REGION: region TLV + * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV + * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs + */ +enum iwl_dbg_tlv_type { + IWL_DBG_TLV_TYPE_DEBUG_INFO = + IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE, + IWL_DBG_TLV_TYPE_BUF_ALLOC, + IWL_DBG_TLV_TYPE_HCMD, + IWL_DBG_TLV_TYPE_REGION, + IWL_DBG_TLV_TYPE_TRIGGER, + IWL_DBG_TLV_TYPE_NUM, +}; + +/** + * struct iwl_dbg_tlv_ver_data - debug TLV version struct + * @min_ver: min version supported + * @max_ver: max version supported + */ +struct iwl_dbg_tlv_ver_data { + int min_ver; + int max_ver; +}; + +static const struct iwl_dbg_tlv_ver_data +dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = { + [IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,}, + [IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,}, + [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,}, + [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 1,}, + [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,}, +}; + +static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv) { - struct iwl_apply_point_data *data; - struct iwl_fw_ini_header *header = (void *)&tlv->data[0]; - u32 apply_point = le32_to_cpu(header->apply_point); - - int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv); - int offset_size = copy_size; - - if (le32_to_cpu(header->tlv_version) != 1) - return; - - if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM, - "Invalid apply point id %d\n", apply_point)) - return; + struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0]; + u32 type = le32_to_cpu(tlv->type); + u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; + u32 ver = le32_to_cpu(hdr->tlv_version); - if (ext) - data = &trans->dbg.apply_points_ext[apply_point]; - else - data = &trans->dbg.apply_points[apply_point]; + if (ver < dbg_ver_table[tlv_idx].min_ver || + ver > dbg_ver_table[tlv_idx].max_ver) + return false; - /* add room for is_alloc field in &iwl_fw_ini_allocation_data struct */ - if (le32_to_cpu(tlv->type) == IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION) { - struct iwl_fw_ini_allocation_data *buf_alloc = - (void *)tlv->data; - - offset_size += sizeof(buf_alloc->is_alloc); - } - - /* - * Make sure we still have room to copy this TLV. Offset points to the - * location the last copy ended. - */ - if (WARN_ONCE(data->offset + offset_size > data->size, - "Not enough memory for apply point %d\n", - apply_point)) - return; - - memcpy(data->data + data->offset, (void *)tlv, copy_size); - data->offset += offset_size; + return true; } -void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data, +void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, bool ext) { - struct iwl_ucode_tlv *tlv; - u32 size[IWL_FW_INI_APPLY_NUM] = {0}; - int i; - - while (len >= sizeof(*tlv)) { - u32 tlv_len, tlv_type, apply; - struct iwl_fw_ini_header *hdr; - - len -= sizeof(*tlv); - tlv = (void *)data; - - tlv_len = le32_to_cpu(tlv->length); - tlv_type = le32_to_cpu(tlv->type); - - if (len < tlv_len) - return; - - len -= ALIGN(tlv_len, 4); - data += sizeof(*tlv) + ALIGN(tlv_len, 4); - - if (tlv_type < IWL_UCODE_TLV_DEBUG_BASE || - tlv_type > IWL_UCODE_TLV_DEBUG_MAX) - continue; - - hdr = (void *)&tlv->data[0]; - apply = le32_to_cpu(hdr->apply_point); - - if (le32_to_cpu(hdr->tlv_version) != 1) - continue; - - IWL_DEBUG_FW(trans, "WRT: read TLV 0x%x, apply point %d\n", - le32_to_cpu(tlv->type), apply); - - if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM)) - continue; - - /* add room for is_alloc field in &iwl_fw_ini_allocation_data - * struct - */ - if (tlv_type == IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION) { - struct iwl_fw_ini_allocation_data *buf_alloc = - (void *)tlv->data; - - size[apply] += sizeof(buf_alloc->is_alloc); - } - - size[apply] += sizeof(*tlv) + tlv_len; + struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0]; + u32 type = le32_to_cpu(tlv->type); + u32 pnt = le32_to_cpu(hdr->apply_point); + u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; + enum iwl_ini_cfg_state *cfg_state = ext ? + &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg; + + IWL_DEBUG_FW(trans, "WRT: read TLV 0x%x, apply point %d\n", + type, pnt); + + if (tlv_idx >= IWL_DBG_TLV_TYPE_NUM) { + IWL_ERR(trans, "WRT: Unsupported TLV 0x%x\n", type); + goto out_err; } - for (i = 0; i < ARRAY_SIZE(size); i++) { - void *mem; - - if (!size[i]) - continue; + if (!iwl_dbg_tlv_ver_support(tlv)) { + IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type, + le32_to_cpu(hdr->tlv_version)); + goto out_err; + } - mem = kzalloc(size[i], GFP_KERNEL); + if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED) + *cfg_state = IWL_INI_CFG_STATE_LOADED; - if (!mem) { - IWL_ERR(trans, "No memory for apply point %d\n", i); - return; - } + return; - if (ext) { - trans->dbg.apply_points_ext[i].data = mem; - trans->dbg.apply_points_ext[i].size = size[i]; - } else { - trans->dbg.apply_points[i].data = mem; - trans->dbg.apply_points[i].size = size[i]; - } - - trans->dbg.ini_valid = true; - } +out_err: + *cfg_state = IWL_INI_CFG_STATE_CORRUPTED; } -void iwl_fw_dbg_free(struct iwl_trans *trans) +void iwl_dbg_tlv_del_timers(struct iwl_trans *trans) { - int i; - - for (i = 0; i < ARRAY_SIZE(trans->dbg.apply_points); i++) { - kfree(trans->dbg.apply_points[i].data); - trans->dbg.apply_points[i].size = 0; - trans->dbg.apply_points[i].offset = 0; + /* will be used later */ +} +IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers); - kfree(trans->dbg.apply_points_ext[i].data); - trans->dbg.apply_points_ext[i].size = 0; - trans->dbg.apply_points_ext[i].offset = 0; - } +void iwl_dbg_tlv_free(struct iwl_trans *trans) +{ + /* will be used again later */ } -static int iwl_parse_fw_dbg_tlv(struct iwl_trans *trans, const u8 *data, - size_t len) +static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data, + size_t len) { struct iwl_ucode_tlv *tlv; - enum iwl_ucode_tlv_type tlv_type; u32 tlv_len; while (len >= sizeof(*tlv)) { @@ -210,7 +173,6 @@ static int iwl_parse_fw_dbg_tlv(struct iwl_trans *trans, const u8 *data, tlv = (void *)data; tlv_len = le32_to_cpu(tlv->length); - tlv_type = le32_to_cpu(tlv->type); if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", @@ -220,39 +182,33 @@ static int iwl_parse_fw_dbg_tlv(struct iwl_trans *trans, const u8 *data, len -= ALIGN(tlv_len, 4); data += sizeof(*tlv) + ALIGN(tlv_len, 4); - switch (tlv_type) { - case IWL_UCODE_TLV_TYPE_DEBUG_INFO: - case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: - case IWL_UCODE_TLV_TYPE_HCMD: - case IWL_UCODE_TLV_TYPE_REGIONS: - case IWL_UCODE_TLV_TYPE_TRIGGERS: - case IWL_UCODE_TLV_TYPE_DEBUG_FLOW: - iwl_fw_dbg_copy_tlv(trans, tlv, true); - break; - default: - WARN_ONCE(1, "Invalid TLV %x\n", tlv_type); - break; - } + iwl_dbg_tlv_alloc(trans, tlv, true); } return 0; } -void iwl_load_fw_dbg_tlv(struct device *dev, struct iwl_trans *trans) +void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) { const struct firmware *fw; int res; - if (trans->dbg.external_ini_loaded || !iwlwifi_mod_params.enable_ini) + if (!iwlwifi_mod_params.enable_ini) return; res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev); if (res) return; - iwl_alloc_dbg_tlv(trans, fw->size, fw->data, true); - iwl_parse_fw_dbg_tlv(trans, fw->data, fw->size); + iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size); - trans->dbg.external_ini_loaded = true; release_firmware(fw); } + +void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data) +{ + /* will be used later */ +} +IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h index 222cd789e07a..e257ad358c94 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,7 +28,7 @@ * * BSD LICENSE * - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -66,22 +66,32 @@ /** * struct iwl_apply_point_data - * @data: start address of this apply point data - * @size total size of the data - * @offset: current offset of the copied data + * @list: list to go through the TLVs of the apply point + * @tlv: a debug TLV */ struct iwl_apply_point_data { - void *data; - int size; - int offset; + struct list_head list; + struct iwl_ucode_tlv tlv; +}; + +/** + * union iwl_dbg_tlv_tp_data - data that is given in a time point + * @fw_pkt: a packet received from the FW + */ +union iwl_dbg_tlv_tp_data { + struct iwl_rx_packet *fw_pkt; }; struct iwl_trans; -void iwl_load_fw_dbg_tlv(struct device *dev, struct iwl_trans *trans); -void iwl_fw_dbg_free(struct iwl_trans *trans); -void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, - bool ext); -void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data, +struct iwl_fw_runtime; + +void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans); +void iwl_dbg_tlv_free(struct iwl_trans *trans); +void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, bool ext); +void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data); +void iwl_dbg_tlv_del_timers(struct iwl_trans *trans); #endif /* __iwl_dbg_tlv_h__*/ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h index cba958eb5186..fc8bc212ee84 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h @@ -75,7 +75,6 @@ static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, #include <linux/tracepoint.h> #include <linux/device.h> -#include "iwl-trans.h" #if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 38672dd5aae9..ff0519ea00a5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -215,7 +215,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) const struct iwl_cfg *cfg = drv->trans->cfg; char tag[8]; - if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && + if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 && (CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP && CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) { IWL_ERR(drv, @@ -647,9 +647,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, len -= sizeof(*ucode); - if (iwlwifi_mod_params.enable_ini) - iwl_alloc_dbg_tlv(drv->trans, len, data, false); - while (len >= sizeof(*tlv)) { len -= sizeof(*tlv); tlv = (void *)data; @@ -1123,7 +1120,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; - if (drv->trans->cfg->device_family < + if (drv->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000) break; drv->trans->dbg.umac_error_event_table = @@ -1139,7 +1136,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; - if (drv->trans->cfg->device_family < + if (drv->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000) break; drv->trans->dbg.lmac_error_event_table[0] = @@ -1154,9 +1151,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_TYPE_HCMD: case IWL_UCODE_TLV_TYPE_REGIONS: case IWL_UCODE_TLV_TYPE_TRIGGERS: - case IWL_UCODE_TLV_TYPE_DEBUG_FLOW: if (iwlwifi_mod_params.enable_ini) - iwl_fw_dbg_copy_tlv(drv->trans, tlv, false); + iwl_dbg_tlv_alloc(drv->trans, tlv, false); break; case IWL_UCODE_TLV_CMD_VERSIONS: if (tlv_len % sizeof(struct iwl_fw_cmd_version)) { @@ -1526,14 +1522,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12; else fw->init_evtlog_size = - drv->trans->cfg->base_params->max_event_log_size; + drv->trans->trans_cfg->base_params->max_event_log_size; fw->init_errlog_ptr = pieces->init_errlog_ptr; fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr; if (pieces->inst_evtlog_size) fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12; else fw->inst_evtlog_size = - drv->trans->cfg->base_params->max_event_log_size; + drv->trans->trans_cfg->base_params->max_event_log_size; fw->inst_errlog_ptr = pieces->inst_errlog_ptr; /* @@ -1640,7 +1636,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) init_completion(&drv->request_firmware_complete); INIT_LIST_HEAD(&drv->list); - iwl_load_fw_dbg_tlv(drv->trans->dev, drv->trans); + iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans); #ifdef CONFIG_IWLWIFI_DEBUGFS /* Create the device debugfs entries. */ @@ -1662,8 +1658,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) err_fw: #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_drv); + iwl_dbg_tlv_free(drv->trans); #endif - iwl_fw_dbg_free(drv->trans); kfree(drv); err: return ERR_PTR(ret); @@ -1693,7 +1689,7 @@ void iwl_drv_stop(struct iwl_drv *drv) debugfs_remove_recursive(drv->dbgfs_drv); #endif - iwl_fw_dbg_free(drv->trans); + iwl_dbg_tlv_free(drv->trans); kfree(drv); } @@ -1704,8 +1700,6 @@ struct iwl_mod_params iwlwifi_mod_params = { .fw_restart = true, .bt_coex_active = true, .power_level = IWL_POWER_INDEX_1, - .d0i3_disable = true, - .d0i3_timeout = 1000, .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT, /* the rest are 0 by default */ }; @@ -1823,9 +1817,6 @@ MODULE_PARM_DESC(antenna_coupling, module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444); MODULE_PARM_DESC(nvm_file, "NVM file name"); -module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable, bool, 0444); -MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)"); - module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444); MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); @@ -1873,9 +1864,6 @@ module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, 0444); MODULE_PARM_DESC(fw_monitor, "firmware monitor - to debug FW (default: false - needs lots of memory)"); -module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_timeout, uint, 0444); -MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)"); - module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444); MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index 04338c3a6205..cf7e2a9232e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -728,12 +728,13 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data, #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ -void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, +void iwl_init_ht_hw_capab(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, enum nl80211_band band, u8 tx_chains, u8 rx_chains) { + const struct iwl_cfg *cfg = trans->cfg; int max_bit_rate = 0; tx_chains = hweight8(tx_chains); @@ -765,7 +766,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, if (cfg->ht_params->ldpc) ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; - if ((cfg->mq_rx_supported && + if ((trans->trans_cfg->mq_rx_supported && iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) || iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; @@ -805,10 +806,11 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, } } -static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, +static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const u8 *eeprom, size_t eeprom_size) { + struct device *dev = trans->dev; int n_channels = iwl_init_channel_map(dev, cfg, data, eeprom, eeprom_size); int n_used = 0; @@ -820,7 +822,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, + iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_2GHZ, data->valid_tx_ant, data->valid_rx_ant); sband = &data->bands[NL80211_BAND_5GHZ]; @@ -829,7 +831,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, + iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_5GHZ, data->valid_tx_ant, data->valid_rx_ant); if (n_channels != n_used) @@ -840,10 +842,11 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, /* EEPROM data functions */ struct iwl_nvm_data * -iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, +iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const u8 *eeprom, size_t eeprom_size) { struct iwl_nvm_data *data; + struct device *dev = trans->dev; const void *tmp; u16 radio_cfg, sku; @@ -918,7 +921,7 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, goto err_free; } - iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size); + iwl_init_sbands(trans, cfg, data, eeprom, eeprom_size); return data; err_free: diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index 2375d300a7cd..03a748cc98fa 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -116,14 +116,14 @@ struct iwl_nvm_data { * later with iwl_free_nvm_data(). */ struct iwl_nvm_data * -iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, +iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const u8 *eeprom, size_t eeprom_size); int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, int n_channels, enum nl80211_band band); -void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, +void iwl_init_ht_hw_capab(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, enum nl80211_band band, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c index 82e87192119e..ad6dc4497437 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -193,7 +193,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans) { int ret; - ret = iwl_finish_nic_init(trans); + ret = iwl_finish_nic_init(trans, trans->trans_cfg); if (ret) return ret; @@ -207,7 +207,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans) * CSR auto clock gate disable bit - * this is only applicable for HW with OTP shadow RAM */ - if (trans->cfg->base_params->shadow_ram_support) + if (trans->trans_cfg->base_params->shadow_ram_support) iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); @@ -328,7 +328,7 @@ static int iwl_find_otp_image(struct iwl_trans *trans, } /* more in the link list, continue */ usedblocks++; - } while (usedblocks <= trans->cfg->base_params->max_ll_items); + } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items); /* OTP has no valid blocks */ IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n"); @@ -361,7 +361,7 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) if (nvm_is_otp < 0) return nvm_is_otp; - sz = trans->cfg->base_params->eeprom_size; + sz = trans->trans_cfg->base_params->eeprom_size; IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz); e = kmalloc(sz, GFP_KERNEL); @@ -396,7 +396,7 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); /* traversing the linked list if no shadow ram supported */ - if (!trans->cfg->base_params->shadow_ram_support) { + if (!trans->trans_cfg->base_params->shadow_ram_support) { ret = iwl_find_otp_image(trans, &validblockaddr); if (ret) goto err_unlock; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index c6a534303936..0c12df558240 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -7,7 +7,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -127,7 +127,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, unsigned int chnl) { - if (trans->cfg->use_tfh) { + if (trans->trans_cfg->use_tfh) { WARN_ON_ONCE(chnl >= 64); return TFH_TFDQ_CBB_TABLE + 8 * chnl; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index a704e25af810..1b7414bf7bef 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -304,10 +304,10 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); void iwl_force_nmi(struct iwl_trans *trans) { - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_9000) + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL_DRV); - else if (trans->cfg->device_family < IWL_DEVICE_FAMILY_AX210) + else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK); else @@ -458,7 +458,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf) FH_TSSR_TX_ERROR_REG }; - if (trans->cfg->mq_rx_supported) + if (trans->trans_cfg->mq_rx_supported) return iwl_dump_rfh(trans, buf); #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -492,11 +492,12 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf) return 0; } -int iwl_finish_nic_init(struct iwl_trans *trans) +int iwl_finish_nic_init(struct iwl_trans *trans, + const struct iwl_cfg_trans_params *cfg_trans) { int err; - if (trans->cfg->bisr_workaround) { + if (cfg_trans->bisr_workaround) { /* ensure the TOP FSM isn't still in previous reset */ mdelay(2); } @@ -506,9 +507,9 @@ int iwl_finish_nic_init(struct iwl_trans *trans) * D0U* --> D0A* (powered-up active) state. */ iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_init_done)); + BIT(cfg_trans->csr->flag_init_done)); - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000) udelay(2); /* @@ -517,13 +518,13 @@ int iwl_finish_nic_init(struct iwl_trans *trans) * and accesses to uCode SRAM. */ err = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_clock_ready), - BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(cfg_trans->csr->flag_mac_clock_ready), + BIT(cfg_trans->csr->flag_mac_clock_ready), 25000); if (err < 0) IWL_DEBUG_INFO(trans, "Failed to wake NIC\n"); - if (trans->cfg->bisr_workaround) { + if (cfg_trans->bisr_workaround) { /* ensure BISR shift has finished */ udelay(200); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h index 920e2146ea3f..f8e4f0f5de0c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -99,7 +99,8 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); void iwl_force_nmi(struct iwl_trans *trans); -int iwl_finish_nic_init(struct iwl_trans *trans); +int iwl_finish_nic_init(struct iwl_trans *trans, + const struct iwl_cfg_trans_params *cfg_trans); /* Error handling */ int iwl_dump_fh(struct iwl_trans *trans, char **buf); @@ -111,35 +112,38 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf); */ static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs) { - return ofs + trans->cfg->umac_prph_offset; + return ofs + trans->cfg->trans.umac_prph_offset; } static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs) { - return iwl_read_prph_no_grab(trans, ofs + trans->cfg->umac_prph_offset); + return iwl_read_prph_no_grab(trans, ofs + + trans->cfg->trans.umac_prph_offset); } static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs) { - return iwl_read_prph(trans, ofs + trans->cfg->umac_prph_offset); + return iwl_read_prph(trans, ofs + trans->cfg->trans.umac_prph_offset); } static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val) { - iwl_write_prph_no_grab(trans, ofs + trans->cfg->umac_prph_offset, val); + iwl_write_prph_no_grab(trans, ofs + trans->cfg->trans.umac_prph_offset, + val); } static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs, u32 val) { - iwl_write_prph(trans, ofs + trans->cfg->umac_prph_offset, val); + iwl_write_prph(trans, ofs + trans->cfg->trans.umac_prph_offset, val); } static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout) { - return iwl_poll_prph_bit(trans, addr + trans->cfg->umac_prph_offset, + return iwl_poll_prph_bit(trans, addr + + trans->cfg->trans.umac_prph_offset, bits, mask, timeout); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index 0cae2ef9b9df..ebea3f308b5d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -115,9 +115,6 @@ enum iwl_uapsd_disable { * @nvm_file: specifies a external NVM file * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default = * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT - * @d0i3_disable: disable d0i3, default = 1, - * @d0i3_timeout: time to wait after no refs are taken before - * entering D0i3 (in msecs) * @lar_disable: disable LAR (regulatory), default = 0 * @fw_monitor: allow to use firmware monitor * @disable_11ac: disable VHT capabilities, default = false. @@ -139,8 +136,6 @@ struct iwl_mod_params { int antenna_coupling; char *nvm_file; u32 uapsd_disable; - bool d0i3_disable; - unsigned int d0i3_timeout; bool lar_disable; bool fw_monitor; bool disable_11ac; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index d87a6bb3e456..c8972f6e38ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -393,11 +393,12 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, return n_channels; } -static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, +static void iwl_init_vht_hw_capab(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_sta_vht_cap *vht_cap, u8 tx_chains, u8 rx_chains) { + const struct iwl_cfg *cfg = trans->cfg; int num_rx_ants = num_of_ant(rx_chains); int num_tx_ants = num_of_ant(tx_chains); unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?: @@ -434,14 +435,14 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: - if (cfg->mq_rx_supported) + if (trans->trans_cfg->mq_rx_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; else vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; case IWL_AMSDU_2K: - if (cfg->mq_rx_supported) + if (trans->trans_cfg->mq_rx_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; else @@ -669,11 +670,13 @@ static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband, } } -static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, +static void iwl_init_sbands(struct iwl_trans *trans, struct iwl_nvm_data *data, const void *nvm_ch_flags, u8 tx_chains, u8 rx_chains, u32 sbands_flags, bool v4) { + struct device *dev = trans->dev; + const struct iwl_cfg *cfg = trans->cfg; int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; @@ -686,7 +689,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, + iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_2GHZ, tx_chains, rx_chains); if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) @@ -698,10 +701,10 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, + iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_5GHZ, tx_chains, rx_chains); if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) - iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, + iwl_init_vht_hw_capab(trans, data, &sband->vht_cap, tx_chains, rx_chains); if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) @@ -793,10 +796,10 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, { __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, - trans->cfg->csr->mac_addr0_strap)); + trans->trans_cfg->csr->mac_addr0_strap)); __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, - trans->cfg->csr->mac_addr1_strap)); + trans->trans_cfg->csr->mac_addr1_strap)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); /* @@ -807,9 +810,9 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, return; mac_addr0 = cpu_to_le32(iwl_read32(trans, - trans->cfg->csr->mac_addr0_otp)); + trans->trans_cfg->csr->mac_addr0_otp)); mac_addr1 = cpu_to_le32(iwl_read32(trans, - trans->cfg->csr->mac_addr1_otp)); + trans->trans_cfg->csr->mac_addr1_otp)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } @@ -896,7 +899,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans, } static bool -iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, +iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __be16 *nvm_hw) { /* @@ -908,7 +911,7 @@ iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, * in 5GHz otherwise the FW will throw a sysassert when we try * to use them. */ - if (cfg->device_family == IWL_DEVICE_FAMILY_7000) { + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { /* * Unlike the other sections in the NVM, the hw * section uses big-endian. @@ -917,7 +920,7 @@ iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, u8 sku = (subsystem_id & 0x1e) >> 1; if (sku == 5 || sku == 9) { - IWL_DEBUG_EEPROM(dev, + IWL_DEBUG_EEPROM(trans->dev, "disabling wide channels in 5GHz (0x%0x %d)\n", subsystem_id, sku); return true; @@ -934,7 +937,6 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported) { - struct device *dev = trans->dev; struct iwl_nvm_data *data; bool lar_enabled; u32 sku, radio_cfg; @@ -942,7 +944,11 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, u16 lar_config; const __le16 *ch_section; - if (cfg->nvm_type != IWL_NVM_EXT) + if (cfg->uhb_supported) + data = kzalloc(struct_size(data, channels, + IWL_NVM_NUM_CHANNELS_UHB), + GFP_KERNEL); + else if (cfg->nvm_type != IWL_NVM_EXT) data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS), GFP_KERNEL); @@ -1012,10 +1018,10 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (lar_fw_supported && lar_enabled) sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; - if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw)) + if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw)) sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ; - iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, + iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains, sbands_flags, false); data->calib_version = 255; @@ -1066,11 +1072,6 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, return flags; } -struct regdb_ptrs { - struct ieee80211_wmm_rule *rule; - u32 token; -}; - struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, @@ -1082,7 +1083,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, const u16 *nvm_chan; struct ieee80211_regdomain *regd, *copy_rd; struct ieee80211_reg_rule *rule; - struct regdb_ptrs *regdb_ptrs; enum nl80211_band band; int center_freq, prev_center_freq = 0; int valid_rules = 0; @@ -1114,12 +1114,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, if (!regd) return ERR_PTR(-ENOMEM); - regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL); - if (!regdb_ptrs) { - copy_rd = ERR_PTR(-ENOMEM); - goto out; - } - /* set alpha2 from FW. */ regd->alpha2[0] = fw_mcc >> 8; regd->alpha2[1] = fw_mcc & 0xff; @@ -1191,8 +1185,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, if (!copy_rd) copy_rd = ERR_PTR(-ENOMEM); -out: - kfree(regdb_ptrs); kfree(regd); return copy_rd; } @@ -1311,7 +1303,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans, le32_to_cpu(dword_buff[3])); /* nvm file validation, dword_buff[2] holds the file version */ - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 && + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_8000 && CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP && le32_to_cpu(dword_buff[2]) < 0xE4A) { ret = -EFAULT; @@ -1495,7 +1487,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, channel_profile = v4 ? (void *)rsp->regulatory.channel_profile : (void *)rsp_v3->regulatory.channel_profile; - iwl_init_sbands(trans->dev, trans->cfg, nvm, + iwl_init_sbands(trans, nvm, channel_profile, nvm->valid_tx_ant & fw->valid_tx_ant, nvm->valid_rx_ant & fw->valid_rx_ant, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index cbd1a8eed620..3008a5246be8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -8,7 +8,7 @@ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -140,9 +140,6 @@ struct iwl_cfg; * @nic_config: configure NIC, called before firmware is started. * May sleep * @wimax_active: invoked when WiMax becomes active. May sleep - * @enter_d0i3: configure the fw to enter d0i3. return 1 to indicate d0i3 - * entrance is aborted (e.g. due to held reference). May sleep. - * @exit_d0i3: configure the fw to exit d0i3. May sleep. */ struct iwl_op_mode_ops { struct iwl_op_mode *(*start)(struct iwl_trans *trans, @@ -164,8 +161,6 @@ struct iwl_op_mode_ops { void (*cmd_queue_full)(struct iwl_op_mode *op_mode); void (*nic_config)(struct iwl_op_mode *op_mode); void (*wimax_active)(struct iwl_op_mode *op_mode); - int (*enter_d0i3)(struct iwl_op_mode *op_mode); - int (*exit_d0i3)(struct iwl_op_mode *op_mode); }; int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops); @@ -258,22 +253,4 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode) op_mode->ops->wimax_active(op_mode); } -static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode) -{ - might_sleep(); - - if (!op_mode->ops->enter_d0i3) - return 0; - return op_mode->ops->enter_d0i3(op_mode); -} - -static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode) -{ - might_sleep(); - - if (!op_mode->ops->exit_d0i3) - return 0; - return op_mode->ops->exit_d0i3(op_mode); -} - #endif /* __iwl_op_mode_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 8d930bfe0727..f47e0f97acf8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -451,6 +451,8 @@ enum { #define UREG_DOORBELL_TO_ISR6 0xA05C04 #define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0) +#define UREG_DOORBELL_TO_ISR6_SUSPEND BIT(18) +#define UREG_DOORBELL_TO_ISR6_RESUME BIT(19) #define FSEQ_ERROR_CODE 0xA340C8 #define FSEQ_TOP_INIT_VERSION 0xA34038 @@ -460,4 +462,7 @@ enum { #define FSEQ_ALIVE_TOKEN 0xA340F0 #define FSEQ_CNVI_ID 0xA3408C #define FSEQ_CNVR_ID 0xA34090 + +#define IWL_D3_SLEEP_STATUS_SUSPEND 0xD3 +#define IWL_D3_SLEEP_STATUS_RESUME 0xD0 #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 727f73e0b3f1..28bdc9a9617e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -66,7 +66,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_cfg *cfg, const struct iwl_trans_ops *ops) { struct iwl_trans *trans; @@ -84,7 +83,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, #endif trans->dev = dev; - trans->cfg = cfg; trans->ops = ops; trans->num_rx_queues = 1; @@ -202,17 +200,3 @@ int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans) return 0; } IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted); - -void iwl_trans_ref(struct iwl_trans *trans) -{ - if (trans->ops->ref) - trans->ops->ref(trans); -} -IWL_EXPORT_SYMBOL(iwl_trans_ref); - -void iwl_trans_unref(struct iwl_trans *trans) -{ - if (trans->ops->unref) - trans->ops->unref(trans); -} -IWL_EXPORT_SYMBOL(iwl_trans_unref); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 0f8aeb111b0e..a31408188ed0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -159,13 +159,6 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) * @CMD_ASYNC: Return right away and don't wait for the response * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of * the response. The caller needs to call iwl_free_resp when done. - * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the - * command queue, but after other high priority commands. Valid only - * with CMD_ASYNC. - * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle. - * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle. - * @CMD_WAKE_UP_TRANS: The command response should wake up the trans - * (i.e. mark it as non-idle). * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be * called after this command completes. Valid only with CMD_ASYNC. */ @@ -173,11 +166,7 @@ enum CMD_MODE { CMD_ASYNC = BIT(0), CMD_WANT_SKB = BIT(1), CMD_SEND_IN_RFKILL = BIT(2), - CMD_HIGH_PRIO = BIT(3), - CMD_SEND_IN_IDLE = BIT(4), - CMD_MAKE_TRANS_IDLE = BIT(5), - CMD_WAKE_UP_TRANS = BIT(6), - CMD_WANT_ASYNC_CALLBACK = BIT(7), + CMD_WANT_ASYNC_CALLBACK = BIT(3), }; #define DEF_CMD_PAYLOAD_SIZE 320 @@ -463,9 +452,8 @@ struct iwl_trans_rxq_dma_data { * * All the handlers MUST be implemented * - * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken - * out of a low power state. From that point on, the HW can send - * interrupts. May sleep. + * @start_hw: starts the HW. From that point on, the HW can send interrupts. + * May sleep. * @op_mode_leave: Turn off the HW RF kill indication if on * May sleep * @start_fw: allocates and inits all the resources for the transport @@ -475,9 +463,8 @@ struct iwl_trans_rxq_dma_data { * the SCD base address in SRAM, then provide it here, or 0 otherwise. * May sleep * @stop_device: stops the whole device (embedded CPU put to reset) and stops - * the HW. If low_power is true, the NIC will be put in low power state. - * From that point on, the HW will be stopped but will still issue an - * interrupt if the HW RF kill switch is triggered. + * the HW. From that point on, the HW will be stopped but will still issue + * an interrupt if the HW RF kill switch is triggered. * This callback must do the right thing and not crash even if %start_hw() * was called but not &start_fw(). May sleep. * @d3_suspend: put the device into the correct mode for WoWLAN during @@ -535,11 +522,6 @@ struct iwl_trans_rxq_dma_data { * @release_nic_access: let the NIC go to sleep. The "flags" parameter * must be the same one that was sent before to the grab_nic_access. * @set_bits_mask - set SRAM register according to value and mask. - * @ref: grab a reference to the transport/FW layers, disallowing - * certain low power states - * @unref: release a reference previously taken with @ref. Note that - * initially the reference count is 1, making an initial @unref - * necessary to allow low power states. * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last * TX'ed commands and similar. The buffer will be vfree'd by the caller. * Note that the transport must fill in the proper file headers. @@ -548,14 +530,14 @@ struct iwl_trans_rxq_dma_data { */ struct iwl_trans_ops { - int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power); + int (*start_hw)(struct iwl_trans *iwl_trans); void (*op_mode_leave)(struct iwl_trans *iwl_trans); int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); - void (*stop_device)(struct iwl_trans *trans, bool low_power); + void (*stop_device)(struct iwl_trans *trans); - void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); + int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset); @@ -566,6 +548,8 @@ struct iwl_trans_ops { void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs); + void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr); + bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int queue_wdg_timeout); @@ -607,8 +591,6 @@ struct iwl_trans_ops { unsigned long *flags); void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, u32 value); - void (*ref)(struct iwl_trans *trans); - void (*unref)(struct iwl_trans *trans); int (*suspend)(struct iwl_trans *trans); void (*resume)(struct iwl_trans *trans); @@ -632,9 +614,6 @@ enum iwl_trans_state { /** * DOC: Platform power management * - * There are two types of platform power management: system-wide - * (WoWLAN) and runtime. - * * In system-wide power management the entire platform goes into a low * power state (e.g. idle or suspend to RAM) at the same time and the * device is configured as a wakeup source for the entire platform. @@ -643,54 +622,46 @@ enum iwl_trans_state { * put the platform in low power mode). The device's behavior in this * mode is dictated by the wake-on-WLAN configuration. * - * In runtime power management, only the devices which are themselves - * idle enter a low power state. This is done at runtime, which means - * that the entire system is still running normally. This mode is - * usually triggered automatically by the device driver and requires - * the ability to enter and exit the low power modes in a very short - * time, so there is not much impact in usability. - * * The terms used for the device's behavior are as follows: * * - D0: the device is fully powered and the host is awake; * - D3: the device is in low power mode and only reacts to * specific events (e.g. magic-packet received or scan * results found); - * - D0I3: the device is in low power mode and reacts to any - * activity (e.g. RX); * * These terms reflect the power modes in the firmware and are not to - * be confused with the physical device power state. The NIC can be - * in D0I3 mode even if, for instance, the PCI device is in D3 state. + * be confused with the physical device power state. */ /** * enum iwl_plat_pm_mode - platform power management mode * * This enumeration describes the device's platform power management - * behavior when in idle mode (i.e. runtime power management) or when - * in system-wide suspend (i.e WoWLAN). + * behavior when in system-wide suspend (i.e WoWLAN). * * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this - * device. At runtime, this means that nothing happens and the - * device always remains in active. In system-wide suspend mode, - * it means that the all connections will be closed automatically - * by mac80211 before the platform is suspended. + * device. In system-wide suspend mode, it means that the all + * connections will be closed automatically by mac80211 before + * the platform is suspended. * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN). - * For runtime power management, this mode is not officially - * supported. - * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode. */ enum iwl_plat_pm_mode { IWL_PLAT_PM_MODE_DISABLED, IWL_PLAT_PM_MODE_D3, - IWL_PLAT_PM_MODE_D0I3, }; -/* Max time to wait for trans to become idle/non-idle on d0i3 - * enter/exit (in msecs). +/** + * enum iwl_ini_cfg_state + * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given + * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded + * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs + * are corrupted. The rest of the debug TLVs will still be used */ -#define IWL_TRANS_IDLE_TIMEOUT 2000 +enum iwl_ini_cfg_state { + IWL_INI_CFG_STATE_NOT_LOADED, + IWL_INI_CFG_STATE_LOADED, + IWL_INI_CFG_STATE_CORRUPTED, +}; /* Max time to wait for nmi interrupt */ #define IWL_TRANS_NMI_TIMEOUT (HZ / 4) @@ -733,8 +704,8 @@ struct iwl_self_init_dram { * @umac_error_event_table: addr of umac error table * @error_event_table_tlv_status: bitmap that indicates what error table * pointers was recevied via TLV. uses enum &iwl_error_event_table_status - * @external_ini_loaded: indicates if an external ini cfg was given - * @ini_valid: indicates if debug ini mode is on + * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state + * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state * @num_blocks: number of blocks in fw_mon * @fw_mon: address of the buffers for firmware monitor * @hw_error: equals true if hw error interrupt was received from the FW @@ -752,14 +723,11 @@ struct iwl_trans_debug { u32 umac_error_event_table; unsigned int error_event_table_tlv_status; - bool external_ini_loaded; - bool ini_valid; - - struct iwl_apply_point_data apply_points[IWL_FW_INI_APPLY_NUM]; - struct iwl_apply_point_data apply_points_ext[IWL_FW_INI_APPLY_NUM]; + enum iwl_ini_cfg_state internal_ini_cfg; + enum iwl_ini_cfg_state external_ini_cfg; int num_blocks; - struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM]; + struct iwl_dram_data fw_mon[IWL_FW_INI_ALLOCATION_NUM]; bool hw_error; enum iwl_fw_ini_buffer_location ini_dest; @@ -770,6 +738,7 @@ struct iwl_trans_debug { * * @ops - pointer to iwl_trans_ops * @op_mode - pointer to the op_mode + * @trans_cfg: the trans-specific configuration part * @cfg - pointer to the configuration * @drv - pointer to iwl_drv * @status: a bit-mask of transport status flags @@ -797,13 +766,11 @@ struct iwl_trans_debug { * @system_pm_mode: the system-wide power management mode in use. * This mode is set dynamically, depending on the WoWLAN values * configured from the userspace at runtime. - * @runtime_pm_mode: the runtime power management mode in use. This - * mode is set during the initialization phase and is not - * supposed to change during runtime. */ struct iwl_trans { const struct iwl_trans_ops *ops; struct iwl_op_mode *op_mode; + const struct iwl_cfg_trans_params *trans_cfg; const struct iwl_cfg *cfg; struct iwl_drv *drv; enum iwl_trans_state state; @@ -844,8 +811,6 @@ struct iwl_trans { struct iwl_self_init_dram init_dram; enum iwl_plat_pm_mode system_pm_mode; - enum iwl_plat_pm_mode runtime_pm_mode; - bool suspending; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ @@ -864,16 +829,11 @@ static inline void iwl_trans_configure(struct iwl_trans *trans, WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); } -static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power) +static inline int iwl_trans_start_hw(struct iwl_trans *trans) { might_sleep(); - return trans->ops->start_hw(trans, low_power); -} - -static inline int iwl_trans_start_hw(struct iwl_trans *trans) -{ - return trans->ops->start_hw(trans, true); + return trans->ops->start_hw(trans); } static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) @@ -909,27 +869,23 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans, return trans->ops->start_fw(trans, fw, run_in_rfkill); } -static inline void _iwl_trans_stop_device(struct iwl_trans *trans, - bool low_power) +static inline void iwl_trans_stop_device(struct iwl_trans *trans) { might_sleep(); - trans->ops->stop_device(trans, low_power); + trans->ops->stop_device(trans); trans->state = IWL_TRANS_NO_FW; } -static inline void iwl_trans_stop_device(struct iwl_trans *trans) -{ - _iwl_trans_stop_device(trans, true); -} - -static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, - bool reset) +static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, + bool reset) { might_sleep(); - if (trans->ops->d3_suspend) - trans->ops->d3_suspend(trans, test, reset); + if (!trans->ops->d3_suspend) + return 0; + + return trans->ops->d3_suspend(trans, test, reset); } static inline int iwl_trans_d3_resume(struct iwl_trans *trans, @@ -1004,6 +960,17 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, trans->ops->reclaim(trans, queue, ssn, skbs); } +static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, + int ptr) +{ + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return; + } + + trans->ops->set_q_ptrs(trans, queue, ptr); +} + static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, bool configure_scd) { @@ -1261,16 +1228,19 @@ static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) trans->ops->sync_nmi(trans); } +static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) +{ + return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED || + trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED; +} + /***************************************************** * transport helper functions *****************************************************/ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_cfg *cfg, const struct iwl_trans_ops *ops); void iwl_trans_free(struct iwl_trans *trans); -void iwl_trans_ref(struct iwl_trans *trans); -void iwl_trans_unref(struct iwl_trans *trans); /***************************************************** * driver (transport) register/unregister functions diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 915b172da57a..60aff2ecec12 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -153,5 +153,6 @@ #define IWL_MVM_FTM_INITIATOR_DYNACK true #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT false +#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index cec40855a641..86c2c587e755 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -735,40 +735,16 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, return 0; } -static void -iwl_mvm_iter_d0i3_ap_keys(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - void (*iter)(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, - void *data), - void *data) -{ - struct ieee80211_sta *ap_sta; - - rcu_read_lock(); - - ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id]); - if (IS_ERR_OR_NULL(ap_sta)) - goto out; - - ieee80211_iter_keys_rcu(mvm->hw, vif, iter, data); -out: - rcu_read_unlock(); -} - -int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool d0i3, - u32 cmd_flags) +static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 cmd_flags) { struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); struct wowlan_key_data key_data = { - .configure_keys = !d0i3 && !unified, + .configure_keys = !unified, .use_rsc_tsc = false, .tkip = &tkip_cmd, .use_tkip = false, @@ -784,25 +760,16 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, * if we have to configure keys, call ieee80211_iter_keys(), * as we need non-atomic context in order to take the * required locks. - * for the d0i3 we can't use ieee80211_iter_keys(), as - * taking (almost) any mutex might result in deadlock. */ - if (!d0i3) { - /* - * Note that currently we don't propagate cmd_flags - * to the iterator. In case of key_data.configure_keys, - * all the configured commands are SYNC, and - * iwl_mvm_wowlan_program_keys() will take care of - * locking/unlocking mvm->mutex. - */ - ieee80211_iter_keys(mvm->hw, vif, - iwl_mvm_wowlan_program_keys, - &key_data); - } else { - iwl_mvm_iter_d0i3_ap_keys(mvm, vif, - iwl_mvm_wowlan_program_keys, - &key_data); - } + /* + * Note that currently we don't propagate cmd_flags + * to the iterator. In case of key_data.configure_keys, + * all the configured commands are SYNC, and + * iwl_mvm_wowlan_program_keys() will take care of + * locking/unlocking mvm->mutex. + */ + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, + &key_data); if (key_data.error) { ret = -EIO; @@ -830,7 +797,7 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, } /* configure rekey data only if offloaded rekey is supported (d3) */ - if (mvmvif->rekey_data.valid && !d0i3) { + if (mvmvif->rekey_data.valid) { memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, NL80211_KCK_LEN); @@ -864,6 +831,8 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + mvm->offload_tid = wowlan_config_cmd->offloading_tid; + if (!unified_image) { ret = iwl_mvm_switch_to_d3(mvm); if (ret) @@ -881,8 +850,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, * that isn't really a problem though. */ mutex_unlock(&mvm->mutex); - ret = iwl_mvm_wowlan_config_key_params(mvm, vif, false, - CMD_ASYNC); + ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC); mutex_lock(&mvm->mutex); if (ret) return ret; @@ -936,6 +904,8 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm, wowlan_config_cmd.wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); + wowlan_config_cmd.sta_id = mvm->aux_sta.sta_id; + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, sizeof(wowlan_config_cmd), &wowlan_config_cmd); @@ -1043,6 +1013,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, } else { struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; + wowlan_config_cmd.sta_id = mvmvif->ap_sta_id; + ap_sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], lockdep_is_held(&mvm->mutex)); @@ -1082,8 +1054,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, * recording before entering D3. In later devices the FW stops the * recording automatically. */ - if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_9000) - iwl_fw_dbg_stop_recording(mvm->trans, NULL); + if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) + iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true); /* must be last -- this switches firmware state */ ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); @@ -1100,13 +1072,12 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - iwl_trans_d3_suspend(mvm->trans, test, !unified_image); + ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image); out: if (ret < 0) { iwl_mvm_free_nd(mvm); if (!unified_image) { - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); if (mvm->fw_restart > 0) { mvm->fw_restart--; ieee80211_restart_hw(mvm->hw); @@ -1119,37 +1090,12 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, return ret; } -static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm) -{ - struct iwl_notification_wait wait_d3; - static const u16 d3_notif[] = { D3_CONFIG_CMD }; - int ret; - - iwl_init_notification_wait(&mvm->notif_wait, &wait_d3, - d3_notif, ARRAY_SIZE(d3_notif), - NULL, NULL); - - ret = iwl_mvm_enter_d0i3(mvm->hw->priv); - if (ret) - goto remove_notif; - - ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ); - WARN_ON_ONCE(ret); - return ret; - -remove_notif: - iwl_remove_notification(&mvm->notif_wait, &wait_d3); - return ret; -} - int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_trans *trans = mvm->trans; int ret; - /* make sure the d0i3 exit work is not pending */ - flush_work(&mvm->d0i3_exit_work); iwl_mvm_pause_tcm(mvm, true); iwl_fw_runtime_suspend(&mvm->fwrt); @@ -1158,25 +1104,6 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) if (ret) return ret; - if (wowlan->any) { - trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; - - if (iwl_mvm_enter_d0i3_on_suspend(mvm)) { - ret = iwl_mvm_enter_d0i3_sync(mvm); - - if (ret) - return ret; - } - - mutex_lock(&mvm->d0i3_suspend_mutex); - __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); - mutex_unlock(&mvm->d0i3_suspend_mutex); - - iwl_trans_d3_suspend(trans, false, false); - - return 0; - } - trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; return __iwl_mvm_suspend(hw, wowlan, false); @@ -1735,6 +1662,13 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, mvm_ap_sta->tid_data[i].seq_number = seq; } + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + i = mvm->offload_tid; + iwl_trans_set_q_ptrs(mvm->trans, + mvm_ap_sta->tid_data[i].txq_id, + mvm_ap_sta->tid_data[i].seq_number >> 4); + } + /* now we have all the data we need, unlock to avoid mac80211 issues */ mutex_unlock(&mvm->mutex); @@ -1752,30 +1686,6 @@ out_unlock: return false; } -void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_wowlan_status *status) -{ - struct iwl_mvm_d3_gtk_iter_data gtkdata = { - .mvm = mvm, - .status = status, - }; - - /* - * rekey handling requires taking locks that can't be taken now. - * however, d0i3 doesn't offload rekey, so we're fine. - */ - if (WARN_ON_ONCE(status->num_of_gtk_rekeys)) - return; - - /* find last GTK that we used initially, if any */ - gtkdata.find_phase = true; - iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, >kdata); - - gtkdata.find_phase = false; - iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, >kdata); -} - #define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \ IWL_SCAN_MAX_PROFILES) @@ -2024,15 +1934,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) if (IS_ERR_OR_NULL(vif)) goto err; - ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); - if (ret) - goto err; - - if (d3_status != IWL_D3_STATUS_ALIVE) { - IWL_INFO(mvm, "Device was reset during suspend\n"); - goto err; - } - iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); if (iwl_mvm_check_rt_status(mvm, vif)) { @@ -2044,6 +1945,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) goto err; } + ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); + if (ret) + goto err; + + if (d3_status != IWL_D3_STATUS_ALIVE) { + IWL_INFO(mvm, "Device was reset during suspend\n"); + goto err; + } + if (d0i3_first) { ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); if (ret < 0) { @@ -2059,6 +1969,9 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) */ iwl_mvm_update_changed_regdom(mvm); + /* Re-configure PPAG settings */ + iwl_mvm_ppag_send_cmd(mvm); + if (!unified_image) /* Re-configure default SAR profile */ iwl_mvm_sar_select_profile(mvm, 1, 1); @@ -2115,14 +2028,6 @@ out: * 2. We are using a unified image but had an error while exiting D3 */ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); - /* - * When switching images we return 1, which causes mac80211 - * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART. - * This type of reconfig calls iwl_mvm_restart_complete(), - * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need - * to take the reference here. - */ - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); return 1; } @@ -2134,53 +2039,12 @@ static int iwl_mvm_resume_d3(struct iwl_mvm *mvm) return __iwl_mvm_resume(mvm, false); } -static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) -{ - bool exit_now; - enum iwl_d3_status d3_status; - struct iwl_trans *trans = mvm->trans; - - iwl_trans_d3_resume(trans, &d3_status, false, false); - - /* - * make sure to clear D0I3_DEFER_WAKEUP before - * calling iwl_trans_resume(), which might wait - * for d0i3 exit completion. - */ - mutex_lock(&mvm->d0i3_suspend_mutex); - __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); - exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, - &mvm->d0i3_suspend_flags); - mutex_unlock(&mvm->d0i3_suspend_mutex); - if (exit_now) { - IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n"); - _iwl_mvm_exit_d0i3(mvm); - } - - iwl_trans_resume(trans); - - if (iwl_mvm_enter_d0i3_on_suspend(mvm)) { - int ret = iwl_mvm_exit_d0i3(mvm->hw->priv); - - if (ret) - return ret; - /* - * d0i3 exit will be deferred until reconfig_complete. - * make sure there we are out of d0i3. - */ - } - return 0; -} - int iwl_mvm_resume(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; - if (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) - ret = iwl_mvm_resume_d0i3(mvm); - else - ret = iwl_mvm_resume_d3(mvm); + ret = iwl_mvm_resume_d3(mvm); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 0c188a82cfc1..ad18c2f1a806 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1056,19 +1056,11 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { - int ret; - if (!iwl_mvm_firmware_running(mvm)) return -EIO; - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI); - if (ret) - return ret; - iwl_force_nmi(mvm->trans); - iwl_mvm_unref(mvm, IWL_MVM_REF_NMI); - return count; } @@ -1181,8 +1173,8 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, struct iwl_rx_mpdu_desc *desc; int bin_len = count / 2; int ret = -EINVAL; - size_t mpdu_cmd_hdr_size = - (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? + size_t mpdu_cmd_hdr_size = (mvm->trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_22560) ? sizeof(struct iwl_rx_mpdu_desc) : IWL_RX_DESC_SIZE_V1; @@ -1190,7 +1182,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, return -EIO; /* supporting only 9000 descriptor */ - if (!mvm->trans->cfg->mq_rx_supported) + if (!mvm->trans->trans_cfg->mq_rx_supported) return -ENOTSUPP; rxb._page = alloc_pages(GFP_ATOMIC, 0); @@ -1380,19 +1372,12 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { - int ret; - - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); - if (ret) - return ret; if (count == 0) return 0; iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1), NULL); - iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); - return count; } @@ -1579,87 +1564,6 @@ static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm, } #endif -#define PRINT_MVM_REF(ref) do { \ - if (mvm->refs[ref]) \ - pos += scnprintf(buf + pos, bufsz - pos, \ - "\t(0x%lx): %d %s\n", \ - BIT(ref), mvm->refs[ref], #ref); \ -} while (0) - -static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - int i, pos = 0; - char buf[256]; - const size_t bufsz = sizeof(buf); - u32 refs = 0; - - for (i = 0; i < IWL_MVM_REF_COUNT; i++) - if (mvm->refs[i]) - refs |= BIT(i); - - pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%x\n", - refs); - - PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN); - PRINT_MVM_REF(IWL_MVM_REF_SCAN); - PRINT_MVM_REF(IWL_MVM_REF_ROC); - PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX); - PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT); - PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS); - PRINT_MVM_REF(IWL_MVM_REF_USER); - PRINT_MVM_REF(IWL_MVM_REF_TX); - PRINT_MVM_REF(IWL_MVM_REF_TX_AGG); - PRINT_MVM_REF(IWL_MVM_REF_ADD_IF); - PRINT_MVM_REF(IWL_MVM_REF_START_AP); - PRINT_MVM_REF(IWL_MVM_REF_BSS_CHANGED); - PRINT_MVM_REF(IWL_MVM_REF_PREPARE_TX); - PRINT_MVM_REF(IWL_MVM_REF_PROTECT_TDLS); - PRINT_MVM_REF(IWL_MVM_REF_CHECK_CTKILL); - PRINT_MVM_REF(IWL_MVM_REF_PRPH_READ); - PRINT_MVM_REF(IWL_MVM_REF_PRPH_WRITE); - PRINT_MVM_REF(IWL_MVM_REF_NMI); - PRINT_MVM_REF(IWL_MVM_REF_TM_CMD); - PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK); - PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA); - PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT); - PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE); - PRINT_MVM_REF(IWL_MVM_REF_SENDING_CMD); - PRINT_MVM_REF(IWL_MVM_REF_RX); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - unsigned long value; - int ret; - bool taken; - - ret = kstrtoul(buf, 10, &value); - if (ret < 0) - return ret; - - mutex_lock(&mvm->mutex); - - taken = mvm->refs[IWL_MVM_REF_USER]; - if (value == 1 && !taken) - iwl_mvm_ref(mvm, IWL_MVM_REF_USER); - else if (value == 0 && taken) - iwl_mvm_unref(mvm, IWL_MVM_REF_USER); - else - ret = -EINVAL; - - mutex_unlock(&mvm->mutex); - - if (ret < 0) - return ret; - return count; -} - #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ @@ -1692,21 +1596,14 @@ iwl_dbgfs_prph_reg_read(struct file *file, int pos = 0; char buf[32]; const size_t bufsz = sizeof(buf); - int ret; if (!mvm->dbgfs_prph_reg_addr) return -EINVAL; - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_READ); - if (ret) - return ret; - pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n", mvm->dbgfs_prph_reg_addr, iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr)); - iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_READ); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -1716,7 +1613,6 @@ iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf, { u8 args; u32 value; - int ret; args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value); /* if we only want to set the reg address - nothing more to do */ @@ -1727,13 +1623,8 @@ iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf, if (args != 2) return -EINVAL; - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); - if (ret) - return ret; - iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value); - iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); out: return count; } @@ -1867,6 +1758,38 @@ iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } +static ssize_t +iwl_dbgfs_ltr_config_write(struct iwl_mvm *mvm, + char *buf, size_t count, loff_t *ppos) +{ + int ret; + struct iwl_ltr_config_cmd ltr_config = {0}; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + if (sscanf(buf, "%x,%x,%x,%x,%x,%x,%x", + <r_config.flags, + <r_config.static_long, + <r_config.static_short, + <r_config.ltr_cfg_values[0], + <r_config.ltr_cfg_values[1], + <r_config.ltr_cfg_values[2], + <r_config.ltr_cfg_values[3]) != 7) { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, sizeof(ltr_config), + <r_config); + mutex_unlock(&mvm->mutex); + + if (ret) + IWL_ERR(mvm, "failed to send ltr configuration cmd\n"); + + return ret ?: count; +} + MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ @@ -1892,7 +1815,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10); MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, @@ -1916,6 +1838,8 @@ MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(amsdu_len, 16); MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32); +MVM_DEBUGFS_WRITE_FILE_OPS(ltr_config, 512); + static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -2091,7 +2015,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600); - MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200); @@ -2104,6 +2027,9 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) #endif MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2)) + MVM_DEBUGFS_ADD_FILE(ltr_config, mvm->debugfs_dir, 0200); + debugfs_create_bool("enable_scan_iteration_notif", 0600, mvm->debugfs_dir, &mvm->scan_iter_notif_enabled); debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 5de54d1559dd..014eca6596e2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -357,13 +357,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_ALIVE_TIMEOUT); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS)); - else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) + else if (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_8000) IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_prph(trans, SB_CPU_1_STATUS), @@ -430,7 +431,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) iwl_wait_init_complete, NULL); - iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY); + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); /* Will also start the device */ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); @@ -438,7 +439,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); goto error; } - iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE); + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, + NULL); /* Send init config command to mark that we are sending NVM access * commands @@ -557,7 +559,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) goto remove_notif; } - if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) { + if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) { ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) goto remove_notif; @@ -1002,6 +1004,113 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd); } +static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) +{ + union acpi_object *wifi_pkg, *data, *enabled; + int i, j, ret, tbl_rev; + int idx = 2; + + mvm->ppag_table.enabled = cpu_to_le32(0); + data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD); + if (IS_ERR(data)) + return PTR_ERR(data); + + wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, + ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev); + + if (IS_ERR(wifi_pkg) || tbl_rev != 0) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + enabled = &wifi_pkg->package.elements[1]; + if (enabled->type != ACPI_TYPE_INTEGER || + (enabled->integer.value != 0 && enabled->integer.value != 1)) { + ret = -EINVAL; + goto out_free; + } + + mvm->ppag_table.enabled = cpu_to_le32(enabled->integer.value); + if (!mvm->ppag_table.enabled) { + ret = 0; + goto out_free; + } + + /* + * read, verify gain values and save them into the PPAG table. + * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the + * following sub-bands to High-Band (5GHz). + */ + for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) { + for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) { + union acpi_object *ent; + + ent = &wifi_pkg->package.elements[idx++]; + if (ent->type != ACPI_TYPE_INTEGER || + (j == 0 && ent->integer.value > ACPI_PPAG_MAX_LB) || + (j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) || + (j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) || + (j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) { + mvm->ppag_table.enabled = cpu_to_le32(0); + ret = -EINVAL; + goto out_free; + } + mvm->ppag_table.gain[i][j] = ent->integer.value; + } + } + ret = 0; +out_free: + kfree(data); + return ret; +} + +int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) +{ + int i, j, ret; + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { + IWL_DEBUG_RADIO(mvm, + "PPAG capability not supported by FW, command not sent.\n"); + return 0; + } + + IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); + IWL_DEBUG_RADIO(mvm, "PPAG is %s\n", + mvm->ppag_table.enabled ? "enabled" : "disabled"); + + for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) { + for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) { + IWL_DEBUG_RADIO(mvm, + "PPAG table: chain[%d] band[%d]: gain = %d\n", + i, j, mvm->ppag_table.gain[i][j]); + } + } + + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, + PER_PLATFORM_ANT_GAIN_CMD), + 0, sizeof(mvm->ppag_table), + &mvm->ppag_table); + if (ret < 0) + IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n", + ret); + + return ret; +} + +static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) +{ + int ret; + + ret = iwl_mvm_get_ppag_table(mvm); + if (ret < 0) { + IWL_DEBUG_RADIO(mvm, + "PPAG BIOS table invalid or unavailable. (%d)\n", + ret); + return 0; + } + return iwl_mvm_ppag_send_cmd(mvm); +} + #else /* CONFIG_ACPI */ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) { @@ -1033,6 +1142,16 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) { return -ENOENT; } + +int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) +{ + return -ENOENT; +} + +static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) +{ + return -ENOENT; +} #endif /* CONFIG_ACPI */ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) @@ -1140,17 +1259,13 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) return ret; } - /* - * Stop and start the transport without entering low power - * mode. This will save the state of other components on the - * device that are triggered by the INIT firwmare (MFUART). - */ - _iwl_trans_stop_device(mvm->trans, false); - ret = _iwl_trans_start_hw(mvm->trans, false); + iwl_fw_dbg_stop_sync(&mvm->fwrt); + iwl_trans_stop_device(mvm->trans); + ret = iwl_trans_start_hw(mvm->trans); if (ret) return ret; - iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY); + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); mvm->rfkill_safe_init_done = false; ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); @@ -1159,7 +1274,8 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) mvm->rfkill_safe_init_done = true; - iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE); + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, + NULL); return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img); } @@ -1169,6 +1285,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) int ret, i; struct ieee80211_channel *chan; struct cfg80211_chan_def chandef; + struct ieee80211_supported_band *sband = NULL; lockdep_assert_held(&mvm->mutex); @@ -1191,7 +1308,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); - if (!mvm->trans->dbg.ini_valid) { + if (!iwl_trans_dbg_ini_valid(mvm->trans)) { mvm->fwrt.dump.conf = FW_DBG_INVALID; /* if we have a destination, assume EARLY START */ if (mvm->fw->dbg.dest_tlv) @@ -1219,7 +1336,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; /* Init RSS configuration */ - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { ret = iwl_configure_rxq(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RX queues: %d\n", @@ -1246,9 +1363,11 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); - ret = iwl_mvm_send_dqa_cmd(mvm); - if (ret) - goto error; + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) { + ret = iwl_mvm_send_dqa_cmd(mvm); + if (ret) + goto error; + } /* Add auxiliary station for scanning */ ret = iwl_mvm_add_aux_sta(mvm); @@ -1256,7 +1375,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; /* Add all the PHY contexts */ - chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0]; + i = 0; + while (!sband && i < NUM_NL80211_BANDS) + sband = mvm->hw->wiphy->bands[i++]; + + if (WARN_ON_ONCE(!sband)) + goto error; + + chan = &sband->channels[0]; + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); for (i = 0; i < NUM_PHY_CTX; i++) { /* @@ -1270,7 +1397,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } -#ifdef CONFIG_THERMAL if (iwl_mvm_is_tt_in_fw(mvm)) { /* in order to give the responsibility of ct-kill and * TX backoff to FW we need to send empty temperature reporting @@ -1282,6 +1408,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) iwl_mvm_tt_tx_backoff(mvm, 0); } +#ifdef CONFIG_THERMAL /* TODO: read the budget from BIOS / Platform NVM */ /* @@ -1294,12 +1421,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; } -#else - /* Initialize tx backoffs to the minimal possible */ - iwl_mvm_tt_tx_backoff(mvm, 0); #endif - WARN_ON(iwl_mvm_config_ltr(mvm)); + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2)) + WARN_ON(iwl_mvm_config_ltr(mvm)); ret = iwl_mvm_power_update_device(mvm); if (ret) @@ -1323,16 +1448,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - /* allow FW/transport low power modes if not during restart */ - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB); if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid)) IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n"); + ret = iwl_mvm_ppag_init(mvm); + if (ret) + goto error; + ret = iwl_mvm_sar_init(mvm); if (ret == 0) { ret = iwl_mvm_sar_geo_init(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c index 4348bb00e761..d104da9170ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -156,7 +156,7 @@ void iwl_mvm_leds_sync(struct iwl_mvm *mvm) * if we control through the register, we're doing it * even when the firmware isn't up, so no need to sync */ - if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) + if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) return; iwl_mvm_led_set(mvm, mvm->led.brightness > 0); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index fe776e35b9d0..9c417dd06291 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1431,6 +1431,9 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); + iwl_dbg_tlv_time_point(&mvm->fwrt, + IWL_FW_INI_TIME_POINT_MISSED_BEACONS, NULL); + trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MISSED_BEACONS); if (!trigger) @@ -1447,8 +1450,6 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, rx_missed_bcon >= stop_trig_missed_bcon) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); - iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_MISSED_BEACONS); - out: rcu_read_unlock(); } @@ -1594,7 +1595,9 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, RCU_INIT_POINTER(mvm->csa_vif, NULL); return; case NL80211_IFTYPE_STATION: - iwl_mvm_csa_client_absent(mvm, vif); + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) + iwl_mvm_csa_client_absent(mvm, vif); cancel_delayed_work(&mvmvif->csa_work); ieee80211_chswitch_done(vif, true); break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a7bc00d1296f..cd1b10042fbf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -213,91 +213,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); -void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) -{ - if (!iwl_mvm_is_d0i3_supported(mvm)) - return; - - IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type); - spin_lock_bh(&mvm->refs_lock); - mvm->refs[ref_type]++; - spin_unlock_bh(&mvm->refs_lock); - iwl_trans_ref(mvm->trans); -} - -void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) -{ - if (!iwl_mvm_is_d0i3_supported(mvm)) - return; - - IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type); - spin_lock_bh(&mvm->refs_lock); - if (WARN_ON(!mvm->refs[ref_type])) { - spin_unlock_bh(&mvm->refs_lock); - return; - } - mvm->refs[ref_type]--; - spin_unlock_bh(&mvm->refs_lock); - iwl_trans_unref(mvm->trans); -} - -static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm, - enum iwl_mvm_ref_type except_ref) -{ - int i, j; - - if (!iwl_mvm_is_d0i3_supported(mvm)) - return; - - spin_lock_bh(&mvm->refs_lock); - for (i = 0; i < IWL_MVM_REF_COUNT; i++) { - if (except_ref == i || !mvm->refs[i]) - continue; - - IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n", - i, mvm->refs[i]); - for (j = 0; j < mvm->refs[i]; j++) - iwl_trans_unref(mvm->trans); - mvm->refs[i] = 0; - } - spin_unlock_bh(&mvm->refs_lock); -} - -bool iwl_mvm_ref_taken(struct iwl_mvm *mvm) -{ - int i; - bool taken = false; - - if (!iwl_mvm_is_d0i3_supported(mvm)) - return true; - - spin_lock_bh(&mvm->refs_lock); - for (i = 0; i < IWL_MVM_REF_COUNT; i++) { - if (mvm->refs[i]) { - taken = true; - break; - } - } - spin_unlock_bh(&mvm->refs_lock); - - return taken; -} - -int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) -{ - iwl_mvm_ref(mvm, ref_type); - - if (!wait_event_timeout(mvm->d0i3_exit_waitq, - !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), - HZ)) { - WARN_ON_ONCE(1); - iwl_mvm_unref(mvm, ref_type); - return -EIO; - } - - return 0; -} - static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) { int i; @@ -485,7 +400,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) * for older devices. We also don't see this issue on any newer * devices. */ - if (mvm->cfg->device_family >= IWL_DEVICE_FAMILY_9000) + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, TX_FRAG_LIST); @@ -762,12 +677,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP - if (iwl_mvm_is_d0i3_supported(mvm) && - device_can_wakeup(mvm->trans->dev)) { - mvm->wowlan.flags = WIPHY_WOWLAN_ANY; - hw->wiphy->wowlan = &mvm->wowlan; - } - if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && @@ -833,46 +742,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) return ret; } -static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct sk_buff *skb) -{ - struct iwl_mvm_sta *mvmsta; - bool defer = false; - - /* - * double check the IN_D0I3 flag both before and after - * taking the spinlock, in order to prevent taking - * the spinlock when not needed. - */ - if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))) - return false; - - spin_lock(&mvm->d0i3_tx_lock); - /* - * testing the flag again ensures the skb dequeue - * loop (on d0i3 exit) hasn't run yet. - */ - if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) - goto out; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (mvmsta->sta_id == IWL_MVM_INVALID_STA || - mvmsta->sta_id != mvm->d0i3_ap_sta_id) - goto out; - - __skb_queue_tail(&mvm->d0i3_tx, skb); - - /* trigger wakeup */ - iwl_mvm_ref(mvm, IWL_MVM_REF_TX); - iwl_mvm_unref(mvm, IWL_MVM_REF_TX); - - defer = true; -out: - spin_unlock(&mvm->d0i3_tx_lock); - return defer; -} - static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) @@ -917,8 +786,6 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, } if (sta) { - if (iwl_mvm_defer_tx(mvm, sta, skb)) - return; if (iwl_mvm_tx_skb(mvm, skb, sta)) goto drop; return; @@ -1086,7 +953,6 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; - bool tx_agg_ref = false; struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; @@ -1101,31 +967,6 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, if (!(mvm->nvm_data->sku_cap_11n_enable)) return -EACCES; - /* return from D0i3 before starting a new Tx aggregation */ - switch (action) { - case IEEE80211_AMPDU_TX_START: - case IEEE80211_AMPDU_TX_STOP_CONT: - case IEEE80211_AMPDU_TX_STOP_FLUSH: - case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - case IEEE80211_AMPDU_TX_OPERATIONAL: - /* - * for tx start, wait synchronously until D0i3 exit to - * get the correct sequence number for the tid. - * additionally, some other ampdu actions use direct - * target access, which is not handled automatically - * by the trans layer (unlike commands), so wait for - * d0i3 exit in these cases as well. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG); - if (ret) - return ret; - - tx_agg_ref = true; - break; - default: - break; - } - mutex_lock(&mvm->mutex); switch (action) { @@ -1186,13 +1027,6 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, } mutex_unlock(&mvm->mutex); - /* - * If the tid is marked as started, we won't use it for offloaded - * traffic on the next D0i3 entry. It's safe to unref. - */ - if (tx_agg_ref) - iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG); - return ret; } @@ -1216,11 +1050,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { - /* cleanup all stale references (scan, roc), but keep the - * ucode_down ref until reconfig is complete - */ - iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); - iwl_mvm_stop_device(mvm); mvm->cur_aid = 0; @@ -1242,7 +1071,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); mvm->p2p_device_vif = NULL; - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); @@ -1251,9 +1079,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ieee80211_wake_queues(mvm->hw); - /* clear any stale d0i3 state */ - clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); - mvm->vif_count = 0; mvm->rx_ba_sessions = 0; mvm->fwrt.dump.conf = FW_DBG_INVALID; @@ -1278,18 +1103,13 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); /* Clean up some internal and mac80211 state on restart */ iwl_mvm_restart_cleanup(mvm); - } else { - /* Hold the reference to prevent runtime suspend while - * the start procedure runs. It's a bit confusing - * that the UCODE_DOWN reference is taken, but it just - * means "UCODE is not UP yet". ( TODO: rename this - * reference). - */ - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); } ret = iwl_mvm_up(mvm); - iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_POST_INIT); + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, + NULL); + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, + NULL); if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* Something went wrong - we need to finish some cleanup @@ -1297,9 +1117,6 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) * would do. */ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); -#ifdef CONFIG_PM - iwl_mvm_d0i3_enable_tx(mvm, NULL); -#endif } return ret; @@ -1310,19 +1127,6 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw) struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; - /* Some hw restart cleanups must not hold the mutex */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - /* - * Make sure we are out of d0i3. This is needed - * to make sure the reference accounting is correct - * (and there is no stale d0i3_exit_work). - */ - wait_event_timeout(mvm->d0i3_exit_waitq, - !test_bit(IWL_MVM_STATUS_IN_D0I3, - &mvm->status), - HZ); - } - mutex_lock(&mvm->mutex); ret = __iwl_mvm_mac_start(mvm); mutex_unlock(&mvm->mutex); @@ -1337,17 +1141,12 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) mutex_lock(&mvm->mutex); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); -#ifdef CONFIG_PM - iwl_mvm_d0i3_enable_tx(mvm, NULL); -#endif + ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", ret); - /* allow transport/FW low power modes */ - iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); - iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); /* @@ -1359,17 +1158,6 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) mutex_unlock(&mvm->mutex); } -static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) -{ - if (iwl_mvm_is_d0i3_supported(mvm) && - iwl_mvm_enter_d0i3_on_suspend(mvm)) - WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq, - !test_bit(IWL_MVM_STATUS_IN_D0I3, - &mvm->status), - HZ), - "D0i3 exit on resume timed out\n"); -} - static void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) @@ -1381,7 +1169,6 @@ iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, iwl_mvm_restart_complete(mvm); break; case IEEE80211_RECONFIG_TYPE_SUSPEND: - iwl_mvm_resume_complete(mvm); break; } } @@ -1443,7 +1230,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - flush_work(&mvm->d0i3_exit_work); flush_work(&mvm->async_handlers_wk); flush_work(&mvm->add_stream_wk); @@ -1457,7 +1243,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) */ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); - iwl_fw_cancel_dumps(&mvm->fwrt); cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); cancel_delayed_work_sync(&mvm->scan_timeout_dwork); iwl_fw_free_dump_desc(&mvm->fwrt); @@ -1543,15 +1328,20 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, goto out_unlock; } - iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); - if (ret) - goto out_unlock; + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { + ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + if (ret) + goto out_unlock; - iwl_mvm_stop_session_protection(mvm, vif); + iwl_mvm_stop_session_protection(mvm, vif); + } } mvmvif->ps_disabled = false; @@ -1612,15 +1402,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); /* - * make sure D0i3 exit is completed, otherwise a target access - * during tx queue configuration could be done when still in - * D0i3 state. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF); - if (ret) - return ret; - - /* * Not much to do here. The stack will not allow interface * types or combinations that we didn't advertise, so we * don't really have to check the types. @@ -1755,8 +1536,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, out_unlock: mutex_unlock(&mvm->mutex); - iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF); - return ret; } @@ -2254,6 +2033,10 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, flags = 0; + /* Block 26-tone RU OFDMA transmissions */ + if (mvmvif->he_ru_2mhz_block) + flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; + /* HTC flags */ if (sta->he_cap.he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) @@ -2520,7 +2303,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, iwl_mvm_sf_update(mvm, vif, false); iwl_mvm_power_vif_assoc(mvm, vif); if (vif->p2p) { - iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT); iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_PROT, IEEE80211_SMPS_DYNAMIC); @@ -2556,9 +2338,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IWL_ERR(mvm, "failed to remove AP station\n"); - if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) - mvm->d0i3_ap_sta_id = - IWL_MVM_INVALID_STA; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; } @@ -2567,9 +2346,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update quotas\n"); - if (vif->p2p) - iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT); - /* this will take the cleared BSSID from bss_conf */ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) @@ -2657,14 +2433,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret, i; - /* - * iwl_mvm_mac_ctxt_add() might read directly from the device - * (the system time), so make sure it is available. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP); - if (ret) - return ret; - mutex_lock(&mvm->mutex); /* Send the beacon template */ @@ -2760,8 +2528,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); - iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); - iwl_mvm_bt_coex_vif_change(mvm); /* we don't support TDLS during DCM */ @@ -2783,7 +2549,6 @@ out_remove: iwl_mvm_mac_ctxt_remove(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); - iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP); return ret; } @@ -2821,8 +2586,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_bt_coex_vif_change(mvm); - iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS); - /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); @@ -2896,14 +2659,6 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - /* - * iwl_mvm_bss_info_changed_station() might call - * iwl_mvm_protect_session(), which reads directly from - * the device (the system time), so make sure it is available. - */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED)) - return; - mutex_lock(&mvm->mutex); if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) @@ -2927,7 +2682,6 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, } mutex_unlock(&mvm->mutex); - iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED); } static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, @@ -3205,6 +2959,51 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, peer_addr, action); } +struct iwl_mvm_he_obss_narrow_bw_ru_data { + bool tolerated; +}; + +static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, + struct cfg80211_bss *bss, + void *_data) +{ + struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; + const struct element *elem; + + elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, bss->ies->data, + bss->ies->len); + + if (!elem || elem->datalen < 10 || + !(elem->data[10] & + WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { + data->tolerated = false; + } +} + +static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = { + .tolerated = true, + }; + + if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) { + mvmvif->he_ru_2mhz_block = false; + return; + } + + cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, + iwl_mvm_check_he_obss_narrow_bw_ru_iter, + &iter_data); + + /* + * If there is at least one AP on radar channel that cannot + * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. + */ + mvmvif->he_ru_2mhz_block = !iter_data.tolerated; +} + static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -3306,6 +3105,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); } else if (vif->type == NL80211_IFTYPE_STATION) { vif->bss_conf.he_support = sta->he_cap.has_he; + + mvmvif->he_ru_2mhz_block = false; + if (sta->he_cap.has_he) + iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } @@ -3447,13 +3251,6 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; - /* - * iwl_mvm_protect_session() reads directly from the device - * (the system time), so make sure it is available. - */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX)) - return; - if (req_duration > duration) duration = req_duration; @@ -3461,8 +3258,6 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, /* Try really hard to protect the session and hear a beacon */ iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); mutex_unlock(&mvm->mutex); - - iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX); } static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, @@ -3537,7 +3332,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: - if (!mvm->trans->cfg->gen2) { + if (!mvm->trans->trans_cfg->gen2) { key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; } else if (vif->type == NL80211_IFTYPE_STATION) { @@ -4045,7 +3840,8 @@ out_unlock: return ret; } -static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) +static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -4277,23 +4073,12 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { u32 duration = 3 * vif->bss_conf.beacon_int; - - /* iwl_mvm_protect_session() reads directly from the - * device (the system time), so make sure it is - * available. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA); - if (ret) - goto out_remove_binding; - /* Protect the session to make sure we hear the first * beacon on the new channel. */ iwl_mvm_protect_session(mvm, vif, duration, duration, vif->bss_conf.beacon_int / 2, true); - - iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); } iwl_mvm_update_quotas(mvm, false, NULL); @@ -4643,6 +4428,42 @@ static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, 0, sizeof(cmd), &cmd); } +static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 apply_time; + + /* Schedule the time event to a bit before beacon 1, + * to make sure we're in the new channel when the + * GO/AP arrives. In case count <= 1 immediately schedule the + * TE (this might result with some packet loss or connection + * loss). + */ + if (chsw->count <= 1) + apply_time = 0; + else + apply_time = chsw->device_timestamp + + ((vif->bss_conf.beacon_int * (chsw->count - 1) - + IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); + + if (chsw->block_tx) + iwl_mvm_csa_client_absent(mvm, vif); + + if (mvmvif->bf_data.bf_enabled) { + int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + + if (ret) + return ret; + } + + iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, + apply_time); + + return 0; +} + #define IWL_MAX_CSA_BLOCK_TX 1500 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -4651,7 +4472,6 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *csa_vif; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 apply_time; int ret; mutex_lock(&mvm->mutex); @@ -4695,21 +4515,7 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, break; case NL80211_IFTYPE_STATION: - /* Schedule the time event to a bit before beacon 1, - * to make sure we're in the new channel when the - * GO/AP arrives. In case count <= 1 immediately schedule the - * TE (this might result with some packet loss or connection - * loss). - */ - if (chsw->count <= 1) - apply_time = 0; - else - apply_time = chsw->device_timestamp + - ((vif->bss_conf.beacon_int * (chsw->count - 1) - - IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); - if (chsw->block_tx) { - iwl_mvm_csa_client_absent(mvm, vif); /* * In case of undetermined / long time with immediate * quiet monitor status to gracefully disconnect @@ -4721,19 +4527,14 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); } - if (mvmvif->bf_data.bf_enabled) { - ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { + ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); if (ret) goto out_unlock; - } - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) + } else { iwl_mvm_schedule_client_csa(mvm, vif, chsw); - else - iwl_mvm_schedule_csa_period(mvm, vif, - vif->bss_conf.beacon_int, - apply_time); + } mvmvif->csa_count = chsw->count; mvmvif->csa_misbehave = false; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index a263cc629d75..843d00bf2bd5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -258,38 +258,6 @@ enum iwl_mvm_smps_type_request { NUM_IWL_MVM_SMPS_REQ, }; -enum iwl_mvm_ref_type { - IWL_MVM_REF_UCODE_DOWN, - IWL_MVM_REF_SCAN, - IWL_MVM_REF_ROC, - IWL_MVM_REF_ROC_AUX, - IWL_MVM_REF_P2P_CLIENT, - IWL_MVM_REF_AP_IBSS, - IWL_MVM_REF_USER, - IWL_MVM_REF_TX, - IWL_MVM_REF_TX_AGG, - IWL_MVM_REF_ADD_IF, - IWL_MVM_REF_START_AP, - IWL_MVM_REF_BSS_CHANGED, - IWL_MVM_REF_PREPARE_TX, - IWL_MVM_REF_PROTECT_TDLS, - IWL_MVM_REF_CHECK_CTKILL, - IWL_MVM_REF_PRPH_READ, - IWL_MVM_REF_PRPH_WRITE, - IWL_MVM_REF_NMI, - IWL_MVM_REF_TM_CMD, - IWL_MVM_REF_EXIT_WORK, - IWL_MVM_REF_PROTECT_CSA, - IWL_MVM_REF_FW_DBG_COLLECT, - IWL_MVM_REF_INIT_UCODE, - IWL_MVM_REF_SENDING_CMD, - IWL_MVM_REF_RX, - - /* update debugfs.c when changing this */ - - IWL_MVM_REF_COUNT, -}; - enum iwl_bt_force_ant_mode { BT_FORCE_ANT_DIS = 0, BT_FORCE_ANT_AUTO, @@ -504,6 +472,9 @@ struct iwl_mvm_vif { /* we can only have 2 GTK + 2 IGTK active at a time */ struct ieee80211_key_conf *ap_early_keys[4]; + + /* 26-tone RU OFDMA transmissions should be blocked */ + bool he_ru_2mhz_block; }; static inline struct iwl_mvm_vif * @@ -617,11 +588,6 @@ struct iwl_mvm_frame_stats { int last_frame_idx; }; -enum { - D0I3_DEFER_WAKEUP, - D0I3_PENDING_WAKEUP, -}; - #define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff #define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 #define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 @@ -695,6 +661,12 @@ struct iwl_mvm_tcm { * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state * @mvm: mvm pointer, needed for frame timer context + * @consec_oldsn_drops: consecutive drops due to old SN + * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track + * when to apply old SN consecutive drop workaround + * @consec_oldsn_prev_drop: track whether or not an MPDU + * that was single/part of the previous A-MPDU was + * dropped due to old SN */ struct iwl_mvm_reorder_buffer { u16 head_sn; @@ -708,6 +680,9 @@ struct iwl_mvm_reorder_buffer { bool valid; spinlock_t lock; struct iwl_mvm *mvm; + unsigned int consec_oldsn_drops; + u32 consec_oldsn_ampdu_gp2; + unsigned int consec_oldsn_prev_drop:1; } ____cacheline_aligned_in_smp; /** @@ -1011,10 +986,6 @@ struct iwl_mvm { unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; u8 fw_key_deleted[STA_KEY_MAX_NUM]; - /* references taken by the driver and spinlock protecting them */ - spinlock_t refs_lock; - u8 refs[IWL_MVM_REF_COUNT]; - u8 vif_count; struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER]; @@ -1039,6 +1010,7 @@ struct iwl_mvm { struct ieee80211_channel **nd_channels; int n_nd_channels; bool net_detect; + u8 offload_tid; #ifdef CONFIG_IWLWIFI_DEBUGFS bool d3_wake_sysassert; bool d3_test_active; @@ -1048,17 +1020,6 @@ struct iwl_mvm { #endif #endif - /* d0i3 */ - u8 d0i3_ap_sta_id; - bool d0i3_offloading; - struct work_struct d0i3_exit_work; - struct sk_buff_head d0i3_tx; - /* protect d0i3_suspend_flags */ - struct mutex d0i3_suspend_mutex; - unsigned long d0i3_suspend_flags; - /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ - spinlock_t d0i3_tx_lock; - wait_queue_head_t d0i3_exit_waitq; wait_queue_head_t rx_sync_waitq; /* BT-Coex */ @@ -1184,6 +1145,8 @@ struct iwl_mvm { struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM]; struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES]; u32 geo_rev; + struct iwl_ppag_table_cmd ppag_table; + u32 ppag_rev; #endif }; @@ -1201,7 +1164,6 @@ struct iwl_mvm { * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active - * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA @@ -1212,7 +1174,6 @@ enum iwl_mvm_status { IWL_MVM_STATUS_ROC_RUNNING, IWL_MVM_STATUS_HW_RESTART_REQUESTED, IWL_MVM_STATUS_IN_HW_RESTART, - IWL_MVM_STATUS_IN_D0I3, IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_FIRMWARE_RUNNING, IWL_MVM_STATUS_NEED_FLUSH_P2P, @@ -1291,13 +1252,6 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) lockdep_is_held(&mvm->mutex)); } -static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) -{ - return !iwlwifi_mod_params.d0i3_disable && - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); -} - static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, @@ -1333,19 +1287,6 @@ static inline bool iwl_mvm_is_short_beacon_notif_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF); } -static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) -{ - /* For now we only use this mode to differentiate between - * slave transports, which handle D0i3 entry in suspend by - * themselves in conjunction with runtime PM D0i3. So, this - * function is used to check whether we need to do anything - * when entering suspend or if the transport layer has already - * done it. - */ - return (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) && - (mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3); -} - static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue) { return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) && @@ -1424,13 +1365,13 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) { /* TODO - replace with TLV once defined */ - return mvm->trans->cfg->use_tfh; + return mvm->trans->trans_cfg->use_tfh; } static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm) { /* TODO - better define this */ - return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000; + return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) @@ -1455,7 +1396,13 @@ static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm) * but then there's a little bit of code in scan that won't make * any sense... */ - return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000; + return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000; +} + +static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER); } static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) @@ -1487,7 +1434,6 @@ iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp) static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) { -#ifdef CONFIG_THERMAL /* these two TLV are redundant since the responsibility to CT-kill by * FW happens only after we send at least one command of * temperature THs report. @@ -1496,9 +1442,6 @@ static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT); -#else /* CONFIG_THERMAL */ - return false; -#endif /* CONFIG_THERMAL */ } static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) @@ -1663,6 +1606,8 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); +void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, int queue); int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, const u8 *data, u32 count, bool async); void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, @@ -1863,30 +1808,9 @@ void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, extern const struct file_operations iwl_dbgfs_d3_test_ops; struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm); #ifdef CONFIG_PM -int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool host_awake, - u32 cmd_flags); -void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_wowlan_status *status); void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else -static inline int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool host_awake, - u32 cmd_flags) -{ - return 0; -} - -static inline void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_wowlan_status *status) -{ -} - static inline void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -1900,19 +1824,6 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, bool offload_ns, u32 cmd_flags); -/* D0i3 */ -void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); -void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); -int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); -bool iwl_mvm_ref_taken(struct iwl_mvm *mvm); - -#ifdef CONFIG_PM -void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); -int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode); -int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode); -int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); -#endif - /* BT Coex */ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm); void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, @@ -1943,9 +1854,6 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) {} #endif -int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable, u32 flags); int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags); @@ -2025,7 +1933,7 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set, */ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) { - return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & + return ((BIT(mvm->trans->trans_cfg->base_params->num_of_queues) - 1) & ~BIT(IWL_MVM_DQA_CMD_QUEUE)); } @@ -2034,7 +1942,8 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); iwl_fw_cancel_timestamp(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); - iwl_fwrt_stop_device(&mvm->fwrt); + iwl_fw_dbg_stop_sync(&mvm->fwrt); + iwl_trans_stop_device(mvm->trans); iwl_free_fw_paging(&mvm->fwrt); iwl_fw_dump_conf_clear(&mvm->fwrt); } @@ -2154,6 +2063,7 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); +int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm); #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index a9bb43a2f27b..945c1ea5cda8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -249,7 +249,7 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, while (ret == length) { /* Check no memory assumptions fail and cause an overflow */ if ((size_read + offset + length) > - mvm->cfg->base_params->eeprom_size) { + mvm->trans->trans_cfg->base_params->eeprom_size) { IWL_ERR(mvm, "EEPROM size is too small for NVM\n"); return -ENOBUFS; } @@ -372,7 +372,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm) /* Read From FW NVM */ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); - nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, + nvm_buffer = kmalloc(mvm->trans->trans_cfg->base_params->eeprom_size, GFP_KERNEL); if (!nvm_buffer) return -ENOMEM; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 4888054dc3d8..3acbd5b7ab4b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -173,7 +173,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) * unrelated errors. Need to further investigate this, but for now * we'll separate cases. */ - if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) + if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) @@ -389,6 +389,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), HCMD_NAME(REPLY_RX_PHY_CMD), HCMD_NAME(REPLY_RX_MPDU_CMD), + HCMD_NAME(BAR_FRAME_RELEASE), HCMD_NAME(FRAME_RELEASE), HCMD_NAME(BA_NOTIF), HCMD_NAME(MCC_UPDATE_CMD), @@ -414,6 +415,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(SCAN_ITERATION_COMPLETE), HCMD_NAME(D0I3_END_CMD), HCMD_NAME(LTR_CONFIG), + HCMD_NAME(LDBG_CONFIG_CMD), }; /* Please keep this array *SORTED* by hex value. @@ -465,6 +467,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_debug_names[] = { + HCMD_NAME(DBGC_SUSPEND_RESUME), + HCMD_NAME(BUFFER_ALLOCATION), HCMD_NAME(MFU_ASSERT_DUMP_NTF), }; @@ -514,9 +518,6 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { /* this forward declaration can avoid to export the function */ static void iwl_mvm_async_handlers_wk(struct work_struct *wk); -#ifdef CONFIG_PM -static void iwl_mvm_d0i3_exit_work(struct work_struct *wk); -#endif static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm) { @@ -564,23 +565,16 @@ unlock: static int iwl_mvm_fwrt_dump_start(void *ctx) { struct iwl_mvm *mvm = ctx; - int ret = 0; mutex_lock(&mvm->mutex); - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT); - if (ret) - mutex_unlock(&mvm->mutex); - - return ret; + return 0; } static void iwl_mvm_fwrt_dump_end(void *ctx) { struct iwl_mvm *mvm = ctx; - iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); - mutex_unlock(&mvm->mutex); } @@ -672,7 +666,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (iwl_mvm_has_new_rx_api(mvm)) { op_mode->ops = &iwl_mvm_ops_mq; trans->rx_mpdu_cmd_hdr_size = - (trans->cfg->device_family >= + (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? sizeof(struct iwl_rx_mpdu_desc) : IWL_RX_DESC_SIZE_V1; @@ -700,7 +694,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); - mutex_init(&mvm->d0i3_suspend_mutex); spin_lock_init(&mvm->async_handlers_lock); INIT_LIST_HEAD(&mvm->time_event_list); INIT_LIST_HEAD(&mvm->aux_roc_te_list); @@ -710,18 +703,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); -#ifdef CONFIG_PM - INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); -#endif INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); INIT_LIST_HEAD(&mvm->add_stream_txqs); - spin_lock_init(&mvm->d0i3_tx_lock); - spin_lock_init(&mvm->refs_lock); - skb_queue_head_init(&mvm->d0i3_tx); - init_waitqueue_head(&mvm->d0i3_exit_waitq); init_waitqueue_head(&mvm->rx_sync_waitq); atomic_set(&mvm->queue_sync_counter, 0); @@ -744,7 +730,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) rb_size_default = IWL_AMSDU_2K; else rb_size_default = IWL_AMSDU_4K; @@ -768,12 +754,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.rx_buf_size = rb_size_default; } - BUILD_BUG_ON(sizeof(struct iwl_ldbg_config_cmd) != - LDBG_CFG_COMMAND_SIZE); - trans->wide_cmd_header = true; trans_cfg.bc_table_dword = - mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560; + mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560; trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); @@ -832,13 +815,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, goto out_free; mutex_lock(&mvm->mutex); - iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); err = iwl_run_init_mvm_ucode(mvm, true); if (err && err != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); if (!iwlmvm_mod_params.init_dbg || !err) iwl_mvm_stop_device(mvm); - iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); mutex_unlock(&mvm->mutex); if (err < 0) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); @@ -870,11 +851,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, else memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); - /* The transport always starts with a taken reference, we can - * release it now if d0i3 is supported */ - if (iwl_mvm_is_d0i3_supported(mvm)) - iwl_trans_unref(mvm->trans); - iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); return op_mode; @@ -898,13 +874,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); int i; - /* If d0i3 is supported, we have released the reference that - * the transport started with, so we should take it back now - * that we are leaving. - */ - if (iwl_mvm_is_d0i3_supported(mvm)) - iwl_trans_ref(mvm->trans); - iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); @@ -931,7 +900,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_fw_runtime_free(&mvm->fwrt); mutex_destroy(&mvm->mutex); - mutex_destroy(&mvm->d0i3_suspend_mutex); ieee80211_free_hw(mvm->hw); } @@ -1020,7 +988,10 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { int i; + union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; + iwl_dbg_tlv_time_point(&mvm->fwrt, + IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data); iwl_mvm_rx_check_trigger(mvm, pkt); /* @@ -1091,6 +1062,8 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0); else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); + else if (cmd == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE)) + iwl_mvm_rx_bar_frame_release(mvm, napi, rxb, 0); else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0); else @@ -1234,8 +1207,7 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) * Stop the device if we run OPERATIONAL firmware or if we are in the * middle of the calibrations. */ - return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || - rfkill_safe_init_done); + return state && rfkill_safe_init_done; } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) @@ -1267,7 +1239,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) { iwl_abort_notification_waits(&mvm->notif_wait); - del_timer(&mvm->fwrt.dump.periodic_trig); + iwl_dbg_tlv_del_timers(mvm->trans); /* * This is a bit racy, but worst case we tell mac80211 about @@ -1319,9 +1291,6 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered && !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { - /* don't let the transport/FW power down */ - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); - if (mvm->fw->ucode_capa.error_log_size) { u32 src_size = mvm->fw->ucode_capa.error_log_size; u32 src_addr = mvm->fw->ucode_capa.error_log_addr; @@ -1363,422 +1332,6 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) iwl_mvm_nic_restart(mvm, true); } -#ifdef CONFIG_PM -struct iwl_d0i3_iter_data { - struct iwl_mvm *mvm; - struct ieee80211_vif *connected_vif; - u8 ap_sta_id; - u8 vif_count; - u8 offloading_tid; - bool disable_offloading; -}; - -static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_d0i3_iter_data *iter_data) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_sta *mvmsta; - u32 available_tids = 0; - u8 tid; - - if (WARN_ON(vif->type != NL80211_IFTYPE_STATION || - mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)) - return false; - - mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); - if (!mvmsta) - return false; - - spin_lock_bh(&mvmsta->lock); - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - - /* - * in case of pending tx packets, don't use this tid - * for offloading in order to prevent reuse of the same - * qos seq counters. - */ - if (iwl_mvm_tid_queued(mvm, tid_data)) - continue; - - if (tid_data->state != IWL_AGG_OFF) - continue; - - available_tids |= BIT(tid); - } - spin_unlock_bh(&mvmsta->lock); - - /* - * disallow protocol offloading if we have no available tid - * (with no pending frames and no active aggregation, - * as we don't handle "holes" properly - the scheduler needs the - * frame's seq number and TFD index to match) - */ - if (!available_tids) - return true; - - /* for simplicity, just use the first available tid */ - iter_data->offloading_tid = ffs(available_tids) - 1; - return false; -} - -static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_d0i3_iter_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; - - IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr); - if (vif->type != NL80211_IFTYPE_STATION || - !vif->bss_conf.assoc) - return; - - /* - * in case of pending tx packets or active aggregations, - * avoid offloading features in order to prevent reuse of - * the same qos seq counters. - */ - if (iwl_mvm_disallow_offloading(mvm, vif, data)) - data->disable_offloading = true; - - iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags); - iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, - false, flags); - - /* - * on init/association, mvm already configures POWER_TABLE_CMD - * and REPLY_MCAST_FILTER_CMD, so currently don't - * reconfigure them (we might want to use different - * params later on, though). - */ - data->ap_sta_id = mvmvif->ap_sta_id; - data->vif_count++; - - /* - * no new commands can be sent at this stage, so it's safe - * to save the vif pointer during d0i3 entrance. - */ - data->connected_vif = vif; -} - -static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, - struct iwl_wowlan_config_cmd *cmd, - struct iwl_d0i3_iter_data *iter_data) -{ - struct ieee80211_sta *ap_sta; - struct iwl_mvm_sta *mvm_ap_sta; - - if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA) - return; - - rcu_read_lock(); - - ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]); - if (IS_ERR_OR_NULL(ap_sta)) - goto out; - - mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); - cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; - cmd->offloading_tid = iter_data->offloading_tid; - cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | - ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON; - /* - * The d0i3 uCode takes care of the nonqos counters, - * so configure only the qos seq ones. - */ - iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd); -out: - rcu_read_unlock(); -} - -int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; - int ret; - struct iwl_d0i3_iter_data d0i3_iter_data = { - .mvm = mvm, - }; - struct iwl_wowlan_config_cmd wowlan_config_cmd = { - .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | - IWL_WOWLAN_WAKEUP_BEACON_MISS | - IWL_WOWLAN_WAKEUP_LINK_CHANGE), - }; - struct iwl_d3_manager_config d3_cfg_cmd = { - .min_sleep_time = cpu_to_le32(1000), - .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR), - }; - - IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); - - if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) - return -EINVAL; - - set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); - - /* - * iwl_mvm_ref_sync takes a reference before checking the flag. - * so by checking there is no held reference we prevent a state - * in which iwl_mvm_ref_sync continues successfully while we - * configure the firmware to enter d0i3 - */ - if (iwl_mvm_ref_taken(mvm)) { - IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n"); - clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); - wake_up(&mvm->d0i3_exit_waitq); - return 1; - } - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_enter_d0i3_iterator, - &d0i3_iter_data); - if (d0i3_iter_data.vif_count == 1) { - mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id; - mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading; - } else { - WARN_ON_ONCE(d0i3_iter_data.vif_count > 1); - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; - mvm->d0i3_offloading = false; - } - - iwl_mvm_pause_tcm(mvm, true); - /* make sure we have no running tx while configuring the seqno */ - synchronize_net(); - - /* Flush the hw queues, in case something got queued during entry */ - /* TODO new tx api */ - if (iwl_mvm_has_new_tx_api(mvm)) { - WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n"); - } else { - ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm), - flags); - if (ret) - return ret; - } - - /* configure wowlan configuration only if needed */ - if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) { - /* wake on beacons only if beacon storing isn't supported */ - if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BEACON_STORING)) - wowlan_config_cmd.wakeup_filter |= - cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING); - - iwl_mvm_wowlan_config_key_params(mvm, - d0i3_iter_data.connected_vif, - true, flags); - - iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, - &d0i3_iter_data); - - ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags, - sizeof(wowlan_config_cmd), - &wowlan_config_cmd); - if (ret) - return ret; - } - - return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, - flags | CMD_MAKE_TRANS_IDLE, - sizeof(d3_cfg_cmd), &d3_cfg_cmd); -} - -static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = _data; - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO; - - IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr); - if (vif->type != NL80211_IFTYPE_STATION || - !vif->bss_conf.assoc) - return; - - iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags); -} - -struct iwl_mvm_d0i3_exit_work_iter_data { - struct iwl_mvm *mvm; - struct iwl_wowlan_status *status; - u32 wakeup_reasons; -}; - -static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_d0i3_exit_work_iter_data *data = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 reasons = data->wakeup_reasons; - - /* consider only the relevant station interface */ - if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || - data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id) - return; - - if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) - iwl_mvm_connection_loss(data->mvm, vif, "D0i3"); - else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON) - ieee80211_beacon_loss(vif); - else - iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status); -} - -void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) -{ - struct ieee80211_sta *sta = NULL; - struct iwl_mvm_sta *mvm_ap_sta; - int i; - bool wake_queues = false; - - lockdep_assert_held(&mvm->mutex); - - spin_lock_bh(&mvm->d0i3_tx_lock); - - if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA) - goto out; - - IWL_DEBUG_RPM(mvm, "re-enqueue packets\n"); - - /* get the sta in order to update seq numbers and re-enqueue skbs */ - sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id], - lockdep_is_held(&mvm->mutex)); - - if (IS_ERR_OR_NULL(sta)) { - sta = NULL; - goto out; - } - - if (mvm->d0i3_offloading && qos_seq) { - /* update qos seq numbers if offloading was enabled */ - mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta); - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { - u16 seq = le16_to_cpu(qos_seq[i]); - /* firmware stores last-used one, we store next one */ - seq += 0x10; - mvm_ap_sta->tid_data[i].seq_number = seq; - } - } -out: - /* re-enqueue (or drop) all packets */ - while (!skb_queue_empty(&mvm->d0i3_tx)) { - struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx); - - if (!sta || iwl_mvm_tx_skb(mvm, skb, sta)) - ieee80211_free_txskb(mvm->hw, skb); - - /* if the skb_queue is not empty, we need to wake queues */ - wake_queues = true; - } - clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); - wake_up(&mvm->d0i3_exit_waitq); - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; - if (wake_queues) - ieee80211_wake_queues(mvm->hw); - - spin_unlock_bh(&mvm->d0i3_tx_lock); -} - -static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) -{ - struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work); - struct iwl_mvm_d0i3_exit_work_iter_data iter_data = { - .mvm = mvm, - }; - - struct iwl_wowlan_status *status; - u32 wakeup_reasons = 0; - __le16 *qos_seq = NULL; - - mutex_lock(&mvm->mutex); - - status = iwl_mvm_send_wowlan_get_status(mvm); - if (IS_ERR_OR_NULL(status)) { - /* set to NULL so we don't need to check before kfree'ing */ - status = NULL; - goto out; - } - - wakeup_reasons = le32_to_cpu(status->wakeup_reasons); - qos_seq = status->qos_seq_ctr; - - IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons); - - iter_data.wakeup_reasons = wakeup_reasons; - iter_data.status = status; - ieee80211_iterate_active_interfaces(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_d0i3_exit_work_iter, - &iter_data); -out: - iwl_mvm_d0i3_enable_tx(mvm, qos_seq); - - IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n", - wakeup_reasons); - - /* qos_seq might point inside resp_pkt, so free it only now */ - kfree(status); - - /* the FW might have updated the regdomain */ - iwl_mvm_update_changed_regdom(mvm); - - iwl_mvm_resume_tcm(mvm); - iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK); - mutex_unlock(&mvm->mutex); -} - -int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm) -{ - u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | - CMD_WAKE_UP_TRANS; - int ret; - - IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); - - if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) - return -EINVAL; - - mutex_lock(&mvm->d0i3_suspend_mutex); - if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) { - IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n"); - __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags); - mutex_unlock(&mvm->d0i3_suspend_mutex); - return 0; - } - mutex_unlock(&mvm->d0i3_suspend_mutex); - - ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); - if (ret) - goto out; - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_exit_d0i3_iterator, - mvm); -out: - schedule_work(&mvm->d0i3_exit_work); - return ret; -} - -int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK); - return _iwl_mvm_exit_d0i3(mvm); -} - -#define IWL_MVM_D0I3_OPS \ - .enter_d0i3 = iwl_mvm_enter_d0i3, \ - .exit_d0i3 = iwl_mvm_exit_d0i3, -#else /* CONFIG_PM */ -#define IWL_MVM_D0I3_OPS -#endif /* CONFIG_PM */ - #define IWL_MVM_COMMON_OPS \ /* these could be differentiated */ \ .async_cb = iwl_mvm_async_cb, \ @@ -1789,7 +1342,6 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) .nic_error = iwl_mvm_nic_error, \ .cmd_queue_full = iwl_mvm_cmd_queue_full, \ .nic_config = iwl_mvm_nic_config, \ - IWL_MVM_D0I3_OPS \ /* as we only register one, these MUST be common! */ \ .start = iwl_op_mode_mvm_start, \ .stop = iwl_op_mode_mvm_stop diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 86e40bae57e3..0243dbe8ac49 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -289,8 +289,17 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) if (ctxt->ref == 0) { struct ieee80211_channel *chan; struct cfg80211_chan_def chandef; + struct ieee80211_supported_band *sband = NULL; + enum nl80211_band band = NL80211_BAND_2GHZ; + + while (!sband && band < NUM_NL80211_BANDS) + sband = mvm->hw->wiphy->bands[band++]; + + if (WARN_ON(!sband)) + return; + + chan = &sband->channels[0]; - chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0]; cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 36f5fa1ee793..22136e4832ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -127,12 +127,11 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, static void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_beacon_filter_cmd *cmd, - bool d0i3) + struct iwl_beacon_filter_cmd *cmd) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (vif->bss_conf.cqm_rssi_thold && !d0i3) { + if (vif->bss_conf.cqm_rssi_thold) { cmd->bf_energy_delta = cpu_to_le32(vif->bss_conf.cqm_rssi_hyst); /* fw uses an absolute value for this */ @@ -849,8 +848,7 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd, - u32 cmd_flags, - bool d0i3) + u32 cmd_flags) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; @@ -859,13 +857,11 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; - iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3); - if (!d0i3) - iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd); + iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd); + iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd); ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags); - /* don't change bf_enabled in case of temporary d0i3 configuration */ - if (!ret && !d0i3) + if (!ret) mvmvif->bf_data.bf_enabled = true; return ret; @@ -880,12 +876,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, .bf_enable_beacon_filter = cpu_to_le32(1), }; - return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false); + return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags); } static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 flags, bool d0i3) + u32 flags) { struct iwl_beacon_filter_cmd cmd = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -896,8 +892,7 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); - /* don't change bf_enabled in case of temporary d0i3 configuration */ - if (!ret && !d0i3) + if (!ret) mvmvif->bf_data.bf_enabled = false; return ret; @@ -907,7 +902,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags) { - return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false); + return _iwl_mvm_disable_beacon_filter(mvm, vif, flags); } static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) @@ -958,7 +953,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif)); - return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); + return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0); } int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) @@ -1022,58 +1017,3 @@ int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) return 0; } - -int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable, u32 flags) -{ - int ret; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mac_power_cmd cmd = {}; - - if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) - return 0; - - if (!vif->bss_conf.assoc) - return 0; - - iwl_mvm_power_build_cmd(mvm, vif, &cmd, !enable); - - iwl_mvm_power_log(mvm, &cmd); -#ifdef CONFIG_IWLWIFI_DEBUGFS - memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd)); -#endif - ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags, - sizeof(cmd), &cmd); - if (ret) - return ret; - - /* configure beacon filtering */ - if (mvmvif != mvm->bf_allowed_vif) - return 0; - - if (enable) { - struct iwl_beacon_filter_cmd cmd_bf = { - IWL_BF_CMD_CONFIG_D0I3, - .bf_enable_beacon_filter = cpu_to_le32(1), - }; - /* - * When beacon storing is supported - disable beacon filtering - * altogether - the latest beacon will be sent when exiting d0i3 - */ - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BEACON_STORING)) - ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags, - true); - else - ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, - flags, true); - } else { - if (mvmvif->bf_data.bf_enabled) - ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags); - else - ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags); - } - - return ret; -} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 08b67812e94e..8f50e2b121bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -193,7 +193,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, int i, highest_mcs; for (i = 0; i < sta->rx_nss; i++) { - if (i == MAX_NSS) + if (i == IWL_TLC_NSS_MAX) break; highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1); @@ -204,9 +204,10 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, if (sta->bandwidth == IEEE80211_STA_RX_BW_20) supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); - cmd->ht_rates[i][0] = cpu_to_le16(supp); + cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(supp); if (sta->bandwidth == IEEE80211_STA_RX_BW_160) - cmd->ht_rates[i][1] = cmd->ht_rates[i][0]; + cmd->ht_rates[i][IWL_TLC_HT_BW_160] = + cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160]; } } @@ -241,7 +242,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160); int i; - for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) { + for (i = 0; i < sta->rx_nss && i < IWL_TLC_NSS_MAX; i++) { u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3; u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3; u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3; @@ -255,7 +256,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, } if (_mcs_80 > _tx_mcs_80) _mcs_80 = _tx_mcs_80; - cmd->ht_rates[i][0] = + cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80)); /* If one side doesn't support - mark both as not supporting */ @@ -266,7 +267,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, } if (_mcs_160 > _tx_mcs_160) _mcs_160 = _tx_mcs_160; - cmd->ht_rates[i][1] = + cmd->ht_rates[i][IWL_TLC_HT_BW_160] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160)); } } @@ -300,8 +301,10 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); } else if (ht_cap->ht_supported) { cmd->mode = IWL_TLC_MNG_MODE_HT; - cmd->ht_rates[0][0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); - cmd->ht_rates[1][0] = cpu_to_le16(ht_cap->mcs.rx_mask[1]); + cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] = + cpu_to_le16(ht_cap->mcs.rx_mask[0]); + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = + cpu_to_le16(ht_cap->mcs.rx_mask[1]); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index d3f04acfbacb..42d525e46e80 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -3079,11 +3079,7 @@ static void __iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; - /* Treat uninitialized rate scaling data same as non-existing. */ - if (!lq_sta) { - IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n"); - return; - } else if (!lq_sta->pers.drv) { + if (!lq_sta->pers.drv) { IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n"); return; } @@ -3342,7 +3338,7 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, if (num_of_ant(ant) == 1) lq_cmd->single_stream_ant_msk = ant; - if (!mvm->trans->cfg->gen2) + if (!mvm->trans->trans_cfg->gen2) lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; else lq_cmd->agg_frame_cnt_limit = @@ -4127,10 +4123,6 @@ static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta, MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600); } - -void rs_remove_sta_debugfs(void *mvm, void *mvm_sta) -{ -} #endif /* @@ -4158,7 +4150,6 @@ static const struct rate_control_ops rs_mvm_ops_drv = { .rate_update = rs_drv_rate_update, #ifdef CONFIG_MAC80211_DEBUGFS .add_sta_debugfs = rs_drv_add_sta_debugfs, - .remove_sta_debugfs = rs_remove_sta_debugfs, #endif .capa = RATE_CTRL_CAPA_VHT_EXT_NSS_BW, }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 160b0db27103..0ad8ed23a455 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,6 +30,8 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -349,7 +352,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, u32 rate_n_flags; u32 rx_pkt_status; u8 crypt_len = 0; - bool take_ref; phy_info = &mvm->last_phy_info; rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; @@ -557,22 +559,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, ieee80211_is_probe_resp(hdr->frame_control))) rx_status->boottime_ns = ktime_get_boottime_ns(); - /* Take a reference briefly to kick off a d0i3 entry delay so - * we can handle bursts of RX packets without toggling the - * state too often. But don't do this for beacons if we are - * going to idle because the beacon filtering changes we make - * cause the firmware to send us collateral beacons. */ - take_ref = !(test_bit(STATUS_TRANS_GOING_IDLE, &mvm->trans->status) && - ieee80211_is_beacon(hdr->frame_control)); - - if (take_ref) - iwl_mvm_ref(mvm, IWL_MVM_REF_RX); - iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len, crypt_len, rxb); - - if (take_ref) - iwl_mvm_unref(mvm, IWL_MVM_REF_RX); } struct iwl_mvm_stat_data { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 854edd7d7103..77b03b757193 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -349,7 +349,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK)) return 0; - if (mvm->trans->cfg->gen2 && + if (mvm->trans->trans_cfg->gen2 && !(status & RX_MPDU_RES_STATUS_MIC_OK)) stats->flag |= RX_FLAG_MMIC_ERROR; @@ -366,7 +366,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, if (pkt_flags & FH_RSCSR_RADA_EN) { stats->flag |= RX_FLAG_ICV_STRIPPED; - if (mvm->trans->cfg->gen2) + if (mvm->trans->trans_cfg->gen2) stats->flag |= RX_FLAG_MMIC_STRIPPED; } @@ -377,8 +377,16 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, stats->flag |= RX_FLAG_DECRYPTED; return 0; default: - /* Expected in monitor (not having the keys) */ - if (!mvm->monitor_on) + /* + * Sometimes we can get frames that were not decrypted + * because the firmware didn't have the keys yet. This can + * happen after connection where we can get multicast frames + * before the GTK is installed. + * Silently drop those frames. + * Also drop un-decrypted frames in monitor mode. + */ + if (!is_multicast_ether_addr(hdr->addr1) && + !mvm->monitor_on && net_ratelimit()) IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status); } @@ -781,6 +789,55 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, wake_up(&mvm->rx_sync_waitq); } +static void iwl_mvm_oldsn_workaround(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, int tid, + struct iwl_mvm_reorder_buffer *buffer, + u32 reorder, u32 gp2, int queue) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + if (gp2 != buffer->consec_oldsn_ampdu_gp2) { + /* we have a new (A-)MPDU ... */ + + /* + * reset counter to 0 if we didn't have any oldsn in + * the last A-MPDU (as detected by GP2 being identical) + */ + if (!buffer->consec_oldsn_prev_drop) + buffer->consec_oldsn_drops = 0; + + /* either way, update our tracking state */ + buffer->consec_oldsn_ampdu_gp2 = gp2; + } else if (buffer->consec_oldsn_prev_drop) { + /* + * tracking state didn't change, and we had an old SN + * indication before - do nothing in this case, we + * already noted this one down and are waiting for the + * next A-MPDU (by GP2) + */ + return; + } + + /* return unless this MPDU has old SN */ + if (!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN)) + return; + + /* update state */ + buffer->consec_oldsn_prev_drop = 1; + buffer->consec_oldsn_drops++; + + /* if limit is reached, send del BA and reset state */ + if (buffer->consec_oldsn_drops == IWL_MVM_AMPDU_CONSEC_DROPS_DELBA) { + IWL_WARN(mvm, + "reached %d old SN frames from %pM on queue %d, stopping BA session on TID %d\n", + IWL_MVM_AMPDU_CONSEC_DROPS_DELBA, + sta->addr, queue, tid); + ieee80211_stop_rx_ba_session(mvmsta->vif, BIT(tid), sta->addr); + buffer->consec_oldsn_prev_drop = 0; + buffer->consec_oldsn_drops = 0; + } +} + /* * Returns true if the MPDU was buffered\dropped, false if it should be passed * to upper layer. @@ -792,6 +849,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc) { + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb); struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_baid_data *baid_data; @@ -894,6 +952,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); } + iwl_mvm_oldsn_workaround(mvm, sta, tid, buffer, reorder, + rx_status->device_timestamp, queue); + /* drop any oudated packets */ if (ieee80211_sn_less(sn, buffer->head_sn)) goto drop; @@ -1504,7 +1565,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) { rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); channel = desc->v3.channel; gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); @@ -1605,7 +1666,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { u64 tsf_on_air_rise; - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (mvm->trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_22560) tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise); else tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise); @@ -1731,7 +1793,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - if (mvm->trans->cfg->device_family == + if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000) { iwl_mvm_flip_address(hdr->addr3); @@ -1960,3 +2022,42 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, le16_to_cpu(release->nssn), queue, 0); } + +void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, int queue) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_bar_frame_release *release = (void *)pkt->data; + unsigned int baid = le32_get_bits(release->ba_info, + IWL_BAR_FRAME_RELEASE_BAID_MASK); + unsigned int nssn = le32_get_bits(release->ba_info, + IWL_BAR_FRAME_RELEASE_NSSN_MASK); + unsigned int sta_id = le32_get_bits(release->sta_tid, + IWL_BAR_FRAME_RELEASE_STA_MASK); + unsigned int tid = le32_get_bits(release->sta_tid, + IWL_BAR_FRAME_RELEASE_TID_MASK); + struct iwl_mvm_baid_data *baid_data; + + if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || + baid >= ARRAY_SIZE(mvm->baid_map))) + return; + + rcu_read_lock(); + baid_data = rcu_dereference(mvm->baid_map[baid]); + if (!baid_data) { + IWL_DEBUG_RX(mvm, + "Got valid BAID %d but not allocated, invalid BAR release!\n", + baid); + goto out; + } + + if (WARN(tid != baid_data->tid || sta_id != baid_data->sta_id, + "baid 0x%x is mapped to sta:%d tid:%d, but BAR release received for sta:%d tid:%d\n", + baid, baid_data->sta_id, baid_data->tid, sta_id, + tid)) + goto out; + + iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue, 0); +out: + rcu_read_unlock(); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index c284e6975b1b..f6b3045badbd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -77,7 +77,10 @@ #define IWL_SCAN_DWELL_FRAGMENTED 44 #define IWL_SCAN_DWELL_EXTENDED 90 #define IWL_SCAN_NUM_OF_FRAGS 3 +#define IWL_SCAN_LAST_2_4_CHN 14 +#define IWL_SCAN_BAND_5_2 0 +#define IWL_SCAN_BAND_2_4 1 /* adaptive dwell max budget time [TU] for full scan */ #define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300 @@ -512,7 +515,6 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; ieee80211_scan_completed(mvm->hw, &info); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); cancel_delayed_work(&mvm->scan_timeout_dwork); iwl_mvm_resume_tcm(mvm); } else { @@ -957,11 +959,24 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, return flags; } +static void +iwl_mvm_scan_set_legacy_probe_req(struct iwl_scan_probe_req_v1 *p_req, + struct iwl_scan_probe_req *src_p_req) +{ + int i; + + p_req->mac_header = src_p_req->mac_header; + for (i = 0; i < SCAN_NUM_BAND_PROBE_DATA_V_1; i++) + p_req->band_data[i] = src_p_req->band_data[i]; + p_req->common_data = src_p_req->common_data; + memcpy(p_req->buf, src_p_req->buf, sizeof(p_req->buf)); +} + static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_scan_params *params) { struct iwl_scan_req_lmac *cmd = mvm->scan_cmd; - struct iwl_scan_probe_req *preq = + struct iwl_scan_probe_req_v1 *preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) * mvm->fw->ucode_capa.n_scan_channels); u32 ssid_bitmap = 0; @@ -1031,7 +1046,7 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels, params->n_channels, ssid_bitmap, cmd); - *preq = params->preq; + iwl_mvm_scan_set_legacy_probe_req(preq, ¶ms->preq); return 0; } @@ -1205,7 +1220,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) cmd_size = sizeof(struct iwl_scan_config); else cmd_size = sizeof(struct iwl_scan_config_v1); - cmd_size += mvm->fw->ucode_capa.n_scan_channels; + cmd_size += num_channels; cfg = kzalloc(cmd_size, GFP_KERNEL); if (!cfg) @@ -1385,9 +1400,17 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, for (i = 0; i < n_channels; i++) { channel_cfg[i].flags = cpu_to_le32(ssid_bitmap); - channel_cfg[i].channel_num = channels[i]->hw_value; - channel_cfg[i].iter_count = 1; - channel_cfg[i].iter_interval = 0; + channel_cfg[i].v1.channel_num = channels[i]->hw_value; + if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { + channel_cfg[i].v2.band = + channels[i]->hw_value <= IWL_SCAN_LAST_2_4_CHN ? + IWL_SCAN_BAND_2_4 : IWL_SCAN_BAND_5_2; + channel_cfg[i].v2.iter_count = 1; + channel_cfg[i].v2.iter_interval = 0; + } else { + channel_cfg[i].v1.iter_count = 1; + channel_cfg[i].v1.iter_interval = 0; + } } } @@ -1477,9 +1500,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_scan_req_umac *cmd = mvm->scan_cmd; struct iwl_scan_umac_chan_param *chan_param; void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm); - struct iwl_scan_req_umac_tail *sec_part = cmd_data + - sizeof(struct iwl_scan_channel_cfg_umac) * - mvm->fw->ucode_capa.n_scan_channels; + void *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * + mvm->fw->ucode_capa.n_scan_channels; + struct iwl_scan_req_umac_tail_v2 *tail_v2 = + (struct iwl_scan_req_umac_tail_v2 *)sec_part; + struct iwl_scan_req_umac_tail_v1 *tail_v1; + struct iwl_ssid_ie *direct_scan; int uid, i; u32 ssid_bitmap = 0; u8 channel_flags = 0; @@ -1541,18 +1567,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, chan_param->flags = channel_flags; chan_param->count = params->n_channels; - iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap); - - iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, - params->n_channels, ssid_bitmap, - cmd_data); - for (i = 0; i < params->n_scan_plans; i++) { struct cfg80211_sched_scan_plan *scan_plan = ¶ms->scan_plans[i]; - sec_part->schedule[i].iter_count = scan_plan->iterations; - sec_part->schedule[i].interval = + tail_v2->schedule[i].iter_count = scan_plan->iterations; + tail_v2->schedule[i].interval = cpu_to_le16(scan_plan->interval); } @@ -1562,12 +1582,24 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * For example, when regular scan is requested the driver sets one scan * plan with one iteration. */ - if (!sec_part->schedule[i - 1].iter_count) - sec_part->schedule[i - 1].iter_count = 0xff; + if (!tail_v2->schedule[i - 1].iter_count) + tail_v2->schedule[i - 1].iter_count = 0xff; - sec_part->delay = cpu_to_le16(params->delay); - sec_part->preq = params->preq; + tail_v2->delay = cpu_to_le16(params->delay); + if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { + tail_v2->preq = params->preq; + direct_scan = tail_v2->direct_scan; + } else { + tail_v1 = (struct iwl_scan_req_umac_tail_v1 *)sec_part; + iwl_mvm_scan_set_legacy_probe_req(&tail_v1->preq, + ¶ms->preq); + direct_scan = tail_v1->direct_scan; + } + iwl_scan_build_ssids(params, direct_scan, &ssid_bitmap); + iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, + params->n_channels, ssid_bitmap, + cmd_data); return 0; } @@ -1758,7 +1790,6 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); mvm->scan_status |= IWL_MVM_SCAN_REGULAR; mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif); - iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN); schedule_delayed_work(&mvm->scan_timeout_dwork, msecs_to_jiffies(SCAN_TIMEOUT)); @@ -1884,7 +1915,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN); ieee80211_scan_completed(mvm->hw, &info); mvm->scan_vif = NULL; - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); cancel_delayed_work(&mvm->scan_timeout_dwork); iwl_mvm_resume_tcm(mvm); } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { @@ -1909,8 +1939,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, mvm->last_ebs_successful = false; mvm->scan_uid_status[uid] = 0; - - iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_SCAN_COMPLETE); } void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, @@ -1999,6 +2027,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) int iwl_mvm_scan_size(struct iwl_mvm *mvm) { int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1; + int tail_size; if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V8; @@ -2007,16 +2036,21 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) else if (iwl_mvm_cdb_scan_api(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V6; - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + if (iwl_mvm_is_scan_ext_chan_supported(mvm)) + tail_size = sizeof(struct iwl_scan_req_umac_tail_v2); + else + tail_size = sizeof(struct iwl_scan_req_umac_tail_v1); + return base_size + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels + - sizeof(struct iwl_scan_req_umac_tail); - + tail_size; + } return sizeof(struct iwl_scan_req_lmac) + sizeof(struct iwl_scan_channel_cfg_lmac) * mvm->fw->ucode_capa.n_scan_channels + - sizeof(struct iwl_scan_probe_req); + sizeof(struct iwl_scan_probe_req_v1); } /* @@ -2099,10 +2133,6 @@ out: mvm->scan_status &= ~type; if (type == IWL_MVM_SCAN_REGULAR) { - /* Since the rx handler won't do anything now, we have - * to release the scan reference here. - */ - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); cancel_delayed_work(&mvm->scan_timeout_dwork); if (notify) { struct cfg80211_scan_info info = { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 10f18536dd0d..0bedba4c61f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -67,14 +67,6 @@ #include "sta.h" #include "rs.h" -static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm); - -static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, - u32 sta_id, - struct ieee80211_key_conf *key, bool mcast, - u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, - u8 key_offset, bool mfp); - /* * New version of ADD_STA_sta command added new fields at the end of the * structure, so sending the size of the relevant API's structure is enough to @@ -1612,7 +1604,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); mvm_sta->vif = vif; - if (!mvm->trans->cfg->gen2) + if (!mvm->trans->trans_cfg->gen2) mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; else mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; @@ -1895,10 +1887,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, /* unassoc - go ahead - remove the AP STA now */ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; - - /* clear d0i3_ap_sta_id if no longer relevant */ - if (mvm->d0i3_ap_sta_id == sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; } /* @@ -1966,8 +1954,8 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, u8 sta_id, u8 fifo) { unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? - mvm->cfg->base_params->wd_timeout : - IWL_WATCHDOG_DISABLED; + mvm->trans->trans_cfg->base_params->wd_timeout : + IWL_WATCHDOG_DISABLED; if (iwl_mvm_has_new_tx_api(mvm)) { int tvqm_queue = @@ -2771,13 +2759,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, spin_lock_bh(&mvmsta->lock); - /* possible race condition - we entered D0i3 while starting agg */ - if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) { - spin_unlock_bh(&mvmsta->lock); - IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n"); - return -EIO; - } - /* * Note the possible cases: * 1. An enabled TXQ - TXQ needs to become agg'ed @@ -2832,7 +2813,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; - if (mvm->trans->cfg->gen2) + if (mvm->trans->trans_cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn == tid_data->next_reclaimed) { @@ -3872,7 +3853,7 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ - if (mvm->trans->cfg->gen2) + if (mvm->trans->trans_cfg->gen2) sn &= 0xff; return ieee80211_sn_sub(sn, tid_data->next_reclaimed); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 9df21a8d1fc1..1851719e9f4b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -205,19 +205,10 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; - /* - * iwl_mvm_protect_session() reads directly from the device - * (the system time), so make sure it is available. - */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS)) - return; - mutex_lock(&mvm->mutex); /* Protect the session to hear the TDLS setup response on the channel */ iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); mutex_unlock(&mvm->mutex); - - iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS); } static const char * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 4d34e5ab1bff..a06bc63fb516 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -106,10 +106,8 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * in the case that the time event actually completed in the firmware * (which is handled in iwl_mvm_te_handle_notif). */ - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) - iwl_mvm_unref(mvm, IWL_MVM_REF_ROC); - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) - iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX); + clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); + clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); synchronize_net(); @@ -357,7 +355,6 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); - iwl_mvm_ref(mvm, IWL_MVM_REF_ROC); ieee80211_ready_on_channel(mvm->hw); } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { iwl_mvm_te_handle_notify_csa(mvm, te_data, notif); @@ -405,7 +402,6 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); te_data->running = true; - iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX); ieee80211_ready_on_channel(mvm->hw); /* Start TE */ } else { IWL_DEBUG_TE(mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 0b3e5c99d316..32a708301cfc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * @@ -27,7 +27,7 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. @@ -298,16 +298,8 @@ static void check_exit_ctkill(struct work_struct *work) if (__iwl_mvm_mac_start(mvm)) goto reschedule; - /* make sure the device is available for direct read/writes */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_CHECK_CTKILL)) { - __iwl_mvm_mac_stop(mvm); - goto reschedule; - } - ret = iwl_mvm_get_temp(mvm, &temp); - iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL); - __iwl_mvm_mac_stop(mvm); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6ac114a393cc..8a059da7a1fa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -546,7 +546,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, hdr->frame_control); } - if (mvm->trans->cfg->device_family >= + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) { struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; @@ -1169,8 +1169,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, schedule_work(&mvm->add_stream_wk); } - IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, - tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); + IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n", + mvmsta->sta_id, tid, txq_id, + IEEE80211_SEQ_TO_SN(seq_number), skb->len); /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); @@ -1271,7 +1272,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; - if (mvm->trans->cfg->gen2) + if (mvm->trans->trans_cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn != tid_data->next_reclaimed) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index b8e20a01c192..8686107da116 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -88,17 +88,11 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. */ - if (!(cmd->flags & CMD_ASYNC)) { + if (!(cmd->flags & CMD_ASYNC)) lockdep_assert_held(&mvm->mutex); - if (!(cmd->flags & CMD_SEND_IN_IDLE)) - iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD); - } ret = iwl_trans_send_cmd(mvm->trans, cmd); - if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE))) - iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD); - /* * If the caller wants the SKB, then don't hide any problems, the * caller might access the response buffer which will be NULL if @@ -537,7 +531,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num) /* reset the device */ iwl_trans_sw_reset(trans); - err = iwl_finish_nic_init(trans); + err = iwl_finish_nic_init(trans, trans->trans_cfg); if (err) return; } @@ -945,8 +939,9 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, { struct iwl_fw_dbg_trigger_tlv *trigger; struct iwl_fw_dbg_trigger_txq_timer *txq_timer; - unsigned int default_timeout = - cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; + unsigned int default_timeout = cmd_q ? + IWL_DEF_WD_TIMEOUT : + mvm->trans->trans_cfg->base_params->wd_timeout; if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) { /* @@ -990,7 +985,7 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, return default_timeout; default: WARN_ON(1); - return mvm->cfg->base_params->wd_timeout; + return mvm->trans->trans_cfg->base_params->wd_timeout; } } @@ -1436,7 +1431,7 @@ u32 iwl_mvm_get_systime(struct iwl_mvm *mvm) { u32 reg_addr = DEVICE_SYSTEM_TIME_REG; - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000 && + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 && mvm->trans->cfg->gp2_reg_addr) reg_addr = mvm->trans->cfg->gp2_reg_addr; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 5e86783d616b..75fa8a6aafee 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -96,7 +96,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, cpu_to_le64(trans_pcie->rxq->bd_dma); /* Configure debug, for integration */ - if (!trans->dbg.ini_valid) + if (!iwl_trans_dbg_ini_valid(trans)) iwl_pcie_alloc_fw_monitor(trans, 0); if (trans->dbg.num_blocks) { prph_sc_ctrl->hwm_cfg.hwm_base_addr = @@ -180,7 +180,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); else iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 3b12e7ad35e1..e29c47744ef5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -65,7 +65,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> -#include <linux/pm_runtime.h> #include <linux/pci.h> #include <linux/acpi.h> @@ -73,6 +72,7 @@ #include "iwl-trans.h" #include "iwl-drv.h" +#include "iwl-prph.h" #include "internal.h" #define IWL_PCI_DEVICE(dev, subdev, cfg) \ @@ -994,15 +994,22 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; struct iwl_trans *iwl_trans; + unsigned long flags; int ret; - if (WARN_ONCE(!cfg->csr, "CSR addresses aren't configured\n")) - return -EINVAL; - - iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); + iwl_trans = iwl_trans_pcie_alloc(pdev, ent, &cfg->trans); if (IS_ERR(iwl_trans)) return PTR_ERR(iwl_trans); + /* the trans_cfg should never change, so set it now */ + iwl_trans->trans_cfg = &cfg->trans; + + if (WARN_ONCE(!iwl_trans->trans_cfg->csr, + "CSR addresses aren't configured\n")) { + ret = -EINVAL; + goto out_free_trans; + } + #if IS_ENABLED(CONFIG_IWLMVM) /* * special-case 7265D, it has the same PCI IDs. @@ -1018,29 +1025,70 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) else if (cfg == &iwl7265_n_cfg) cfg_7265d = &iwl7265d_n_cfg; if (cfg_7265d && - (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) { + (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) cfg = cfg_7265d; - iwl_trans->cfg = cfg_7265d; - } - if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb && - iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) { - u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id); - u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF); - u32 hr_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR); - - if (rf_id_chp == jf_chp_id) { - if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) - cfg = &iwl9560_2ac_cfg_qnj_jf_b0; - else - cfg = &iwl22000_2ac_cfg_jf; - } else if (rf_id_chp == hr_chp_id) { - if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) - cfg = &iwl22000_2ax_cfg_qnj_hr_a0; - else - cfg = &iwl22000_2ac_cfg_hr; + iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); + + if (cfg == &iwlax210_2ax_cfg_so_hr_a0) { + if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) { + cfg = &iwlax210_2ax_cfg_ty_gf_a0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { + cfg = &iwlax210_2ax_cfg_so_jf_a0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) { + cfg = &iwlax211_2ax_cfg_so_gf_a0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) { + cfg = &iwlax411_2ax_cfg_so_gf4_a0; + } + } else if (cfg == &iwl_ax101_cfg_qu_hr) { + if ((CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && + iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) || + (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) { + cfg = &iwl22000_2ax_cfg_qnj_hr_b0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { + cfg = &iwl_ax101_cfg_qu_hr; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { + cfg = &iwl22000_2ax_cfg_jf; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) { + IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n"); + return -EINVAL; + } else { + IWL_ERR(iwl_trans, "Unrecognized RF ID 0x%08x\n", + CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id)); + return -EINVAL; + } + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && + ((cfg != &iwl_ax200_cfg_cc && + cfg != &killer1650x_2ax_cfg && + cfg != &killer1650w_2ax_cfg && + cfg != &iwl_ax201_cfg_quz_hr) || + iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { + u32 hw_status; + + hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS); + if (CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_B_STEP) + /* + * b step fw is the same for physical card and fpga + */ + cfg = &iwl22000_2ax_cfg_qnj_hr_b0; + else if ((hw_status & UMAG_GEN_HW_IS_FPGA) && + CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_A_STEP) { + cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0; + } else { + /* + * a step no FPGA + */ + cfg = &iwl22000_2ac_cfg_hr; } - iwl_trans->cfg = cfg; } /* @@ -1050,22 +1098,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * thing to do to support Qu C-step. */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) { - if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0; - else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0; - else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0; - else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0; - else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; - else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; - else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) - iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; - else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) - iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0; + if (cfg == &iwl_ax101_cfg_qu_hr) + cfg = &iwl_ax101_cfg_qu_c0_hr_b0; + else if (cfg == &iwl_ax201_cfg_qu_hr) + cfg = &iwl_ax201_cfg_qu_c0_hr_b0; + else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0; + else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0; + else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; + else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; } /* same thing for QuZ... */ @@ -1085,6 +1129,23 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } #endif + /* now set the real cfg we decided to use */ + iwl_trans->cfg = cfg; + + if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000 && + iwl_trans_grab_nic_access(iwl_trans, &flags)) { + u32 hw_step; + + hw_step = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); + hw_step |= ENABLE_WFPM; + iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, hw_step); + hw_step = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP); + hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; + if (hw_step == 0x3) + iwl_trans->hw_rev = (iwl_trans->hw_rev & 0xFFFFFFF3) | + (SILICON_C_STEP << 2); + iwl_trans_release_nic_access(iwl_trans, &flags); + } pci_set_drvdata(pdev, iwl_trans); iwl_trans->drv = iwl_drv_start(iwl_trans); @@ -1097,25 +1158,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* register transport layer debugfs here */ iwl_trans_pcie_dbgfs_register(iwl_trans); - /* if RTPM is in use, enable it in our device */ - if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { - /* We explicitly set the device to active here to - * clear contingent errors. - */ - pm_runtime_set_active(&pdev->dev); - - pm_runtime_set_autosuspend_delay(&pdev->dev, - iwlwifi_mod_params.d0i3_timeout); - pm_runtime_use_autosuspend(&pdev->dev); - - /* We are not supposed to call pm_runtime_allow() by - * ourselves, but let userspace enable runtime PM via - * sysfs. However, since we don't enable this from - * userspace yet, we need to allow/forbid() ourselves. - */ - pm_runtime_allow(&pdev->dev); - } - /* The PCI device starts with a reference taken and we are * supposed to release it here. But to simplify the * interaction with the opmode, we don't do it now, but let @@ -1133,15 +1175,6 @@ static void iwl_pci_remove(struct pci_dev *pdev) { struct iwl_trans *trans = pci_get_drvdata(pdev); - /* if RTPM was in use, restore it to the state before probe */ - if (trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { - /* We should not call forbid here, but we do for now. - * Check the comment to pm_runtime_allow() in - * iwl_pci_probe(). - */ - pm_runtime_forbid(trans->dev); - } - iwl_drv_stop(trans->drv); iwl_trans_pcie_free(trans); @@ -1199,164 +1232,9 @@ static int iwl_pci_resume(struct device *device) return 0; } -int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret; - - if (test_bit(STATUS_FW_ERROR, &trans->status)) - return 0; - - set_bit(STATUS_TRANS_GOING_IDLE, &trans->status); - - /* config the fw */ - ret = iwl_op_mode_enter_d0i3(trans->op_mode); - if (ret == 1) { - IWL_DEBUG_RPM(trans, "aborting d0i3 entrance\n"); - clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); - return -EBUSY; - } - if (ret) - goto err; - - ret = wait_event_timeout(trans_pcie->d0i3_waitq, - test_bit(STATUS_TRANS_IDLE, &trans->status), - msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); - if (!ret) { - IWL_ERR(trans, "Timeout entering D0i3\n"); - ret = -ETIMEDOUT; - goto err; - } - - clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); - - return 0; -err: - clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); - iwl_trans_fw_error(trans); - return ret; -} - -int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ret; - - /* sometimes a D0i3 entry is not followed through */ - if (!test_bit(STATUS_TRANS_IDLE, &trans->status)) - return 0; - - /* config the fw */ - ret = iwl_op_mode_exit_d0i3(trans->op_mode); - if (ret) - goto err; - - /* we clear STATUS_TRANS_IDLE only when D0I3_END command is completed */ - - ret = wait_event_timeout(trans_pcie->d0i3_waitq, - !test_bit(STATUS_TRANS_IDLE, &trans->status), - msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); - if (!ret) { - IWL_ERR(trans, "Timeout exiting D0i3\n"); - ret = -ETIMEDOUT; - goto err; - } - - return 0; -err: - clear_bit(STATUS_TRANS_IDLE, &trans->status); - iwl_trans_fw_error(trans); - return ret; -} - -#ifdef CONFIG_IWLWIFI_PCIE_RTPM -static int iwl_pci_runtime_suspend(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_trans *trans = pci_get_drvdata(pdev); - int ret; - - IWL_DEBUG_RPM(trans, "entering runtime suspend\n"); - - if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { - ret = iwl_pci_fw_enter_d0i3(trans); - if (ret < 0) - return ret; - } - - trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; - - iwl_trans_d3_suspend(trans, false, false); - - return 0; -} - -static int iwl_pci_runtime_resume(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_trans *trans = pci_get_drvdata(pdev); - enum iwl_d3_status d3_status; - - IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n"); - - iwl_trans_d3_resume(trans, &d3_status, false, false); - - if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) - return iwl_pci_fw_exit_d0i3(trans); - - return 0; -} - -static int iwl_pci_system_prepare(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_trans *trans = pci_get_drvdata(pdev); - - IWL_DEBUG_RPM(trans, "preparing for system suspend\n"); - - /* This is called before entering system suspend and before - * the runtime resume is called. Set the suspending flag to - * prevent the wakelock from being taken. - */ - trans->suspending = true; - - /* Wake the device up from runtime suspend before going to - * platform suspend. This is needed because we don't know - * whether wowlan any is set and, if it's not, mac80211 will - * disconnect (in which case, we can't be in D0i3). - */ - pm_runtime_resume(device); - - return 0; -} - -static void iwl_pci_system_complete(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_trans *trans = pci_get_drvdata(pdev); - - IWL_DEBUG_RPM(trans, "completing system suspend\n"); - - /* This is called as a counterpart to the prepare op. It is - * called either when suspending fails or when suspend - * completed successfully. Now there's no risk of grabbing - * the wakelock anymore, so we can release the suspending - * flag. - */ - trans->suspending = false; -} -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ - static const struct dev_pm_ops iwl_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, iwl_pci_resume) -#ifdef CONFIG_IWLWIFI_PCIE_RTPM - SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend, - iwl_pci_runtime_resume, - NULL) - .prepare = iwl_pci_system_prepare, - .complete = iwl_pci_system_complete, -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ }; #define IWL_PM_OPS (&iwl_dev_pm_ops) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 9f5d0fc839fe..1047d48beaa5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -253,7 +253,8 @@ struct iwl_dma_ptr { */ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) { - return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); + return ++index & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); } /** @@ -263,7 +264,7 @@ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, struct iwl_rxq *rxq) { - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) { __le16 *rb_stts = rxq->rb_stts; return READ_ONCE(*rb_stts); @@ -280,7 +281,8 @@ static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, */ static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) { - return --index & (trans->cfg->base_params->max_tfd_queue_size - 1); + return --index & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); } struct iwl_cmd_meta { @@ -556,9 +558,10 @@ struct iwl_trans_pcie { void __iomem *hw_base; bool ucode_write_complete; + bool sx_complete; wait_queue_head_t ucode_write_waitq; wait_queue_head_t wait_command_queue; - wait_queue_head_t d0i3_waitq; + wait_queue_head_t sx_waitq; u8 page_offs, dev_cmd_offs; @@ -581,7 +584,6 @@ struct iwl_trans_pcie { /*protect hw register */ spinlock_t reg_lock; bool cmd_hold_nic_awake; - bool ref_cmd_in_flight; #ifdef CONFIG_IWLWIFI_DEBUGFS struct cont_rec fw_mon_data; @@ -635,9 +637,10 @@ iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) * Convention: trans API functions: iwl_trans_pcie_XXX * Other functions: iwl_pcie_XXX */ -struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, - const struct pci_device_id *ent, - const struct iwl_cfg *cfg); +struct iwl_trans +*iwl_trans_pcie_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + const struct iwl_cfg_trans_params *cfg_trans); void iwl_trans_pcie_free(struct iwl_trans *trans); /***************************************************** @@ -697,6 +700,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb); void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); +void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, struct iwl_txq *txq, u16 byte_cnt, @@ -705,7 +709,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, u8 idx) { - if (trans->cfg->use_tfh) { + if (trans->trans_cfg->use_tfh) { struct iwl_tfh_tfd *tfd = _tfd; struct iwl_tfh_tb *tb = &tfd->tbs[idx]; @@ -911,7 +915,7 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (trans->cfg->use_tfh) + if (trans->trans_cfg->use_tfh) idx = iwl_pcie_get_cmd_index(txq, idx); return txq->tfds + trans_pcie->tfd_size * idx; @@ -955,7 +959,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) MSIX_HW_INT_CAUSES_REG_RF_KILL); } - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_9000) { + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { /* * On 9000-series devices this bit isn't enabled by default, so * when we power down the device we need set the bit to allow it @@ -1045,7 +1049,7 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) { - return (trans->dbg.dest_tlv || trans->dbg.ini_valid); + return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); } void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); @@ -1058,9 +1062,6 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } #endif -int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); -int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); - void iwl_pcie_rx_allocator_work(struct work_struct *data); /* common functions that are used by gen2 transport */ @@ -1113,10 +1114,11 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id); int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); -void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, - bool low_power); -void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power); +void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); +void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); +void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, + bool test, bool reset); #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index a2d709642b2a..19dd075f2f63 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -200,12 +200,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) */ int iwl_pcie_rx_stop(struct iwl_trans *trans) { - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) { /* TODO: remove this for 22560 once fw does it */ iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); - } else if (trans->cfg->mq_rx_supported) { + } else if (trans->trans_cfg->mq_rx_supported) { iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); @@ -232,7 +232,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, * 1. shadow registers aren't enabled * 2. there is a chance that the NIC is asleep */ - if (!trans->cfg->base_params->shadow_reg_enable && + if (!trans->trans_cfg->base_params->shadow_reg_enable && test_bit(STATUS_TPOWER_PMI, &trans->status)) { reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); @@ -240,18 +240,18 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", reg); iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + BIT(trans->trans_cfg->csr->flag_mac_access_req)); rxq->need_update = true; return; } } rxq->write_actual = round_down(rxq->write, 8); - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22560) iwl_write32(trans, HBUS_TARG_WRPTR, (rxq->write_actual | ((FIRST_RX_QUEUE + rxq->id) << 16))); - else if (trans->cfg->mq_rx_supported) + else if (trans->trans_cfg->mq_rx_supported) iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), rxq->write_actual); else @@ -279,7 +279,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans, struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb) { - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) { struct iwl_rx_transfer_desc *bd = rxq->bd; BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); @@ -405,7 +405,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { - if (trans->cfg->mq_rx_supported) + if (trans->trans_cfg->mq_rx_supported) iwl_pcie_rxmq_restock(trans, rxq); else iwl_pcie_rxsq_restock(trans, rxq); @@ -682,7 +682,7 @@ static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td) if (use_rx_td) return sizeof(*rx_td); else - return trans->cfg->mq_rx_supported ? sizeof(__le64) : + return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) : sizeof(__le32); } @@ -690,7 +690,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct device *dev = trans->dev; - bool use_rx_td = (trans->cfg->device_family >= + bool use_rx_td = (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560); int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); @@ -712,7 +712,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, rxq->used_bd_dma = 0; rxq->used_bd = NULL; - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560) return; if (rxq->tr_tail) @@ -735,13 +735,13 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, struct device *dev = trans->dev; int i; int free_size; - bool use_rx_td = (trans->cfg->device_family >= + bool use_rx_td = (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560); size_t rb_stts_size = use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status); spin_lock_init(&rxq->lock); - if (trans->cfg->mq_rx_supported) + if (trans->trans_cfg->mq_rx_supported) rxq->queue_size = MQ_RX_TABLE_SIZE; else rxq->queue_size = RX_QUEUE_SIZE; @@ -757,7 +757,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, if (!rxq->bd) goto err; - if (trans->cfg->mq_rx_supported) { + if (trans->trans_cfg->mq_rx_supported) { rxq->used_bd = dma_alloc_coherent(dev, (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, &rxq->used_bd_dma, @@ -807,7 +807,7 @@ int iwl_pcie_rx_alloc(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, ret; - size_t rb_stts_size = trans->cfg->device_family >= + size_t rb_stts_size = trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560 ? sizeof(__le16) : sizeof(struct iwl_rb_status); @@ -1074,8 +1074,8 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans) rxq->read = 0; rxq->write = 0; rxq->write_actual = 0; - memset(rxq->rb_stts, 0, - (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? + memset(rxq->rb_stts, 0, (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_22560) ? sizeof(__le16) : sizeof(struct iwl_rb_status)); iwl_pcie_rx_init_rxb_lists(rxq); @@ -1088,7 +1088,7 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans) } /* move the pool to the default queue and allocator ownerships */ - queue_size = trans->cfg->mq_rx_supported ? + queue_size = trans->trans_cfg->mq_rx_supported ? MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; allocator_pool_size = trans->num_rx_queues * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); @@ -1120,7 +1120,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (ret) return ret; - if (trans->cfg->mq_rx_supported) + if (trans->trans_cfg->mq_rx_supported) iwl_pcie_rx_mq_hw_init(trans); else iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); @@ -1151,7 +1151,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i; - size_t rb_stts_size = trans->cfg->device_family >= + size_t rb_stts_size = trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560 ? sizeof(__le16) : sizeof(struct iwl_rb_status); @@ -1347,7 +1347,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, } page_stolen |= rxcb._page_stolen; - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) break; offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); } @@ -1392,14 +1392,14 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); - if (!trans->cfg->mq_rx_supported) { + if (!trans->trans_cfg->mq_rx_supported) { rxb = rxq->queue[i]; rxq->queue[i] = NULL; return rxb; } /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */ - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; else vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; @@ -1515,7 +1515,7 @@ out: /* Backtrack one entry */ rxq->read = i; /* update cr tail with the rxq read pointer */ - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) *rxq->cr_tail = cpu_to_le16(r); spin_unlock(&rxq->lock); @@ -1597,7 +1597,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) return; } - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { if (!trans_pcie->txq[i]) continue; del_timer(&trans_pcie->txq[i]->stuck_timer); @@ -1838,7 +1838,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) if (inta & CSR_INT_BIT_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; - if (trans->cfg->gen2) { + if (trans->trans_cfg->gen2) { /* * We can restock, since firmware configured * the RFH @@ -2179,13 +2179,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; - if (trans->cfg->gen2) { + if (trans->trans_cfg->gen2) { /* We can restock, since firmware configured the RFH */ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); } } - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560 && + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22560 && inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) { /* Reflect IML transfer status */ int res = iwl_read32(trans, CSR_IML_RESP_ADDR); @@ -2196,12 +2196,23 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) iwl_pcie_irq_handle_error(trans); } } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { - /* uCode wakes up after power-down sleep */ - IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); - iwl_pcie_rxq_check_wrptr(trans); - iwl_pcie_txq_check_wrptrs(trans); + u32 sleep_notif = + le32_to_cpu(trans_pcie->prph_info->sleep_notif); + if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || + sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) { + IWL_DEBUG_ISR(trans, + "Sx interrupt: sleep notification = 0x%x\n", + sleep_notif); + trans_pcie->sx_complete = true; + wake_up(&trans_pcie->sx_waitq); + } else { + /* uCode wakes up after power-down sleep */ + IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); + iwl_pcie_rxq_check_wrptr(trans); + iwl_pcie_txq_check_wrptrs(trans); - isr_stats->wakeup++; + isr_stats->wakeup++; + } } if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 8d17e68577fd..df8455f14e4d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -92,7 +92,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) iwl_pcie_apm_config(trans); - ret = iwl_finish_nic_init(trans); + ret = iwl_finish_nic_init(trans, trans->trans_cfg); if (ret) return ret; @@ -133,10 +133,10 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_init_done)); + BIT(trans->trans_cfg->csr->flag_init_done)); } -void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) +void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -147,9 +147,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = true; - /* Stop dbgc before stopping device */ - iwl_fw_dbg_stop_recording(trans, NULL); - /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); @@ -171,14 +168,14 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) } iwl_pcie_ctxt_info_free_paging(trans); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) iwl_pcie_ctxt_info_gen3_free(trans); else iwl_pcie_ctxt_info_free(trans); /* Make sure (redundant) we've released our request to stay awake */ iwl_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + BIT(trans->trans_cfg->csr->flag_mac_access_req)); /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); @@ -218,7 +215,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_prepare_card_hw(trans); } -void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) +void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool was_in_rfkill; @@ -226,7 +223,7 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) mutex_lock(&trans_pcie->mutex); trans_pcie->opmode_down = true; was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); - _iwl_trans_pcie_gen2_stop_device(trans, low_power); + _iwl_trans_pcie_gen2_stop_device(trans); iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); mutex_unlock(&trans_pcie->mutex); } @@ -343,7 +340,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, goto out; } - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) ret = iwl_pcie_ctxt_info_gen3_init(trans, fw); else ret = iwl_pcie_ctxt_info_init(trans, fw); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index db62c8314603..5ab87a8dc907 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -69,7 +69,6 @@ #include <linux/bitops.h> #include <linux/gfp.h> #include <linux/vmalloc.h> -#include <linux/pm_runtime.h> #include <linux/module.h> #include <linux/wait.h> @@ -185,8 +184,8 @@ out: static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ - iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset, - BIT(trans->cfg->csr->flag_sw_reset)); + iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset, + BIT(trans->trans_cfg->csr->flag_sw_reset)); usleep_range(5000, 6000); } @@ -216,8 +215,7 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, for (power = max_power; power >= min_power; power--) { size = BIT(power); cpu_addr = dma_alloc_coherent(trans->dev, size, &phys, - GFP_KERNEL | __GFP_NOWARN | - __GFP_ZERO | __GFP_COMP); + GFP_KERNEL | __GFP_NOWARN); if (!cpu_addr) continue; @@ -343,7 +341,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) */ /* Disable L0S exit timer (platform NMI Work/Around) */ - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); @@ -367,10 +365,10 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) iwl_pcie_apm_config(trans); /* Configure analog phase-lock-loop before activating to D0A */ - if (trans->cfg->base_params->pll_cfg) + if (trans->trans_cfg->base_params->pll_cfg) iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); - ret = iwl_finish_nic_init(trans); + ret = iwl_finish_nic_init(trans, trans->trans_cfg); if (ret) return ret; @@ -442,7 +440,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) iwl_trans_pcie_sw_reset(trans); - ret = iwl_finish_nic_init(trans); + ret = iwl_finish_nic_init(trans, trans->trans_cfg); if (WARN_ON(ret)) { /* Release XTAL ON request */ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, @@ -492,7 +490,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_init_done)); + BIT(trans->trans_cfg->csr->flag_init_done)); /* Activates XTAL resources monitor */ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, @@ -514,12 +512,12 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans) int ret; /* stop device's busmaster DMA activity */ - iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset, - BIT(trans->cfg->csr->flag_stop_master)); + iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset, + BIT(trans->trans_cfg->csr->flag_stop_master)); - ret = iwl_poll_bit(trans, trans->cfg->csr->addr_sw_reset, - BIT(trans->cfg->csr->flag_master_dis), - BIT(trans->cfg->csr->flag_master_dis), 100); + ret = iwl_poll_bit(trans, trans->trans_cfg->csr->addr_sw_reset, + BIT(trans->trans_cfg->csr->flag_master_dis), + BIT(trans->trans_cfg->csr->flag_master_dis), 100); if (ret < 0) IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); @@ -535,10 +533,11 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) iwl_pcie_apm_init(trans); /* inform ME that we are leaving */ - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_WAKE_ME); - else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) { + else if (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_8000) { iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, @@ -568,7 +567,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_init_done)); + BIT(trans->trans_cfg->csr->flag_init_done)); } static int iwl_pcie_nic_init(struct iwl_trans *trans) @@ -595,7 +594,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans) if (iwl_pcie_tx_init(trans)) return -ENOMEM; - if (trans->cfg->base_params->shadow_reg_enable) { + if (trans->trans_cfg->base_params->shadow_reg_enable) { /* enable shadow regs in HW */ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); @@ -833,7 +832,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, iwl_enable_interrupts(trans); - if (trans->cfg->use_tfh) { + if (trans->trans_cfg->use_tfh) { if (cpu == 1) iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 0xFFFF); @@ -898,12 +897,12 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans) const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; int i; - if (trans->dbg.ini_valid) { + if (iwl_trans_dbg_ini_valid(trans)) { if (!trans->dbg.num_blocks) return; IWL_DEBUG_FW(trans, - "WRT: applying DRAM buffer[0] destination\n"); + "WRT: Applying DRAM buffer[0] destination\n"); iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, trans->dbg.fw_mon[0].physical >> MON_BUFF_SHIFT_VER2); @@ -965,7 +964,7 @@ monitor: iwl_write_prph(trans, le32_to_cpu(dest->base_reg), trans->dbg.fw_mon[0].physical >> dest->base_shift); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) iwl_write_prph(trans, le32_to_cpu(dest->end_reg), (trans->dbg.fw_mon[0].physical + trans->dbg.fw_mon[0].size - 256) >> @@ -1007,7 +1006,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, /* supported for 7000 only for the moment */ if (iwlwifi_mod_params.fw_monitor && - trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_pcie_alloc_fw_monitor(trans, 0); if (trans->dbg.fw_mon[0].size) { @@ -1136,7 +1135,7 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; int i, arr_size = - (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ? + (trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22560) ? ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2); /* @@ -1146,7 +1145,8 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) */ for (i = 0; i < arr_size; i++) { struct iwl_causes_list *causes = - (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ? + (trans->trans_cfg->device_family != + IWL_DEVICE_FAMILY_22560) ? causes_list : causes_list_v2; iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); @@ -1190,7 +1190,7 @@ void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) struct iwl_trans *trans = trans_pcie->trans; if (!trans_pcie->msix_enabled) { - if (trans->cfg->mq_rx_supported && + if (trans->trans_cfg->mq_rx_supported && test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSI_ENABLE); @@ -1231,7 +1231,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) trans_pcie->hw_mask = trans_pcie->hw_init_mask; } -static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) +static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1242,9 +1242,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = true; - /* Stop dbgc before stopping device */ - iwl_fw_dbg_stop_recording(trans, NULL); - /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); @@ -1274,7 +1271,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) /* Make sure (redundant) we've released our request to stay awake */ iwl_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + BIT(trans->trans_cfg->csr->flag_mac_access_req)); /* Stop the device, and put it in low power state */ iwl_pcie_apm_stop(trans, false); @@ -1401,7 +1398,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); /* Load the given image to the HW */ - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) ret = iwl_pcie_load_given_ucode_8000(trans, fw); else ret = iwl_pcie_load_given_ucode(trans, fw); @@ -1451,7 +1448,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, iwl_trans_pcie_rf_kill(trans, hw_rfkill); } -static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) +static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool was_in_rfkill; @@ -1459,7 +1456,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) mutex_lock(&trans_pcie->mutex); trans_pcie->opmode_down = true; was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); - _iwl_trans_pcie_stop_device(trans, low_power); + _iwl_trans_pcie_stop_device(trans); iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); mutex_unlock(&trans_pcie->mutex); } @@ -1474,22 +1471,16 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", state ? "disabled" : "enabled"); if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { - if (trans->cfg->gen2) - _iwl_trans_pcie_gen2_stop_device(trans, true); + if (trans->trans_cfg->gen2) + _iwl_trans_pcie_gen2_stop_device(trans); else - _iwl_trans_pcie_stop_device(trans, true); + _iwl_trans_pcie_stop_device(trans); } } -static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, - bool reset) +void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, + bool test, bool reset) { - if (!reset) { - /* Enable persistence mode to avoid reset */ - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PERSIST_MODE); - } - iwl_disable_interrupts(trans); /* @@ -1504,9 +1495,9 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, iwl_pcie_synchronize_irqs(trans); iwl_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + BIT(trans->trans_cfg->csr->flag_mac_access_req)); iwl_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_init_done)); + BIT(trans->trans_cfg->csr->flag_init_done)); if (reset) { /* @@ -1520,6 +1511,42 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, iwl_pcie_set_pwr(trans, true); } +static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, + bool reset) +{ + int ret; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* + * Family IWL_DEVICE_FAMILY_AX210 and above persist mode is set by FW. + */ + if (!reset && trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { + /* Enable persistence mode to avoid reset */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + } + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, + UREG_DOORBELL_TO_ISR6_SUSPEND); + + ret = wait_event_timeout(trans_pcie->sx_waitq, + trans_pcie->sx_complete, 2 * HZ); + /* + * Invalidate it toward resume. + */ + trans_pcie->sx_complete = false; + + if (!ret) { + IWL_ERR(trans, "Timeout entering D3\n"); + return -ETIMEDOUT; + } + } + iwl_pcie_d3_complete_suspend(trans, test, reset); + + return 0; +} + static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset) @@ -1531,13 +1558,13 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, if (test) { iwl_enable_interrupts(trans); *status = IWL_D3_STATUS_ALIVE; - return 0; + goto out; } iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + BIT(trans->trans_cfg->csr->flag_mac_access_req)); - ret = iwl_finish_nic_init(trans); + ret = iwl_finish_nic_init(trans, trans->trans_cfg); if (ret) return ret; @@ -1557,7 +1584,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, if (!reset) { iwl_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + BIT(trans->trans_cfg->csr->flag_mac_access_req)); } else { iwl_trans_pcie_tx_reset(trans); @@ -1578,17 +1605,38 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, else *status = IWL_D3_STATUS_ALIVE; +out: + if (*status == IWL_D3_STATUS_ALIVE && + trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + trans_pcie->sx_complete = false; + iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, + UREG_DOORBELL_TO_ISR6_RESUME); + + ret = wait_event_timeout(trans_pcie->sx_waitq, + trans_pcie->sx_complete, 2 * HZ); + /* + * Invalidate it toward next suspend. + */ + trans_pcie->sx_complete = false; + + if (!ret) { + IWL_ERR(trans, "Timeout exiting D3\n"); + return -ETIMEDOUT; + } + } return 0; } -static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, - struct iwl_trans *trans) +static void +iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, + struct iwl_trans *trans, + const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_irqs, num_irqs, i, ret; u16 pci_cmd; - if (!trans->cfg->mq_rx_supported) + if (!cfg_trans->mq_rx_supported) goto enable_msi; max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES); @@ -1709,7 +1757,7 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) { u32 hpm, wprot; - switch (trans->cfg->device_family) { + switch (trans->trans_cfg->device_family) { case IWL_DEVICE_FAMILY_9000: wprot = PREG_PRPH_WPROT_9000; break; @@ -1736,7 +1784,7 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) return 0; } -static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int err; @@ -1772,20 +1820,16 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) /* ...rfkill can call stop_device and set it false if needed */ iwl_pcie_check_hw_rf_kill(trans); - /* Make sure we sync here, because we'll need full access later */ - if (low_power) - pm_runtime_resume(trans->dev); - return 0; } -static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; mutex_lock(&trans_pcie->mutex); - ret = _iwl_trans_pcie_start_hw(trans, low_power); + ret = _iwl_trans_pcie_start_hw(trans); mutex_unlock(&trans_pcie->mutex); return ret; @@ -1828,7 +1872,7 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) { - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) return 0x00FFFFFF; else return 0x000FFFFF; @@ -1899,7 +1943,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_synchronize_irqs(trans); - if (trans->cfg->gen2) + if (trans->trans_cfg->gen2) iwl_pcie_gen2_tx_free(trans); else iwl_pcie_tx_free(trans); @@ -1981,8 +2025,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, /* this bit wakes up the NIC */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) + BIT(trans->trans_cfg->csr->flag_mac_access_req)); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) udelay(2); /* @@ -2006,8 +2050,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, * and do not save/restore SRAM when power cycling. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_val_mac_access_en), - (BIT(trans->cfg->csr->flag_mac_clock_ready) | + BIT(trans->trans_cfg->csr->flag_val_mac_access_en), + (BIT(trans->trans_cfg->csr->flag_mac_clock_ready) | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (unlikely(ret < 0)) { u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); @@ -2089,7 +2133,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, goto out; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + BIT(trans->trans_cfg->csr->flag_mac_access_req)); /* * Above we read the CSR_GP_CNTRL register, which will flush * any previous writes, but we need the write that clears the @@ -2196,7 +2240,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { struct iwl_txq *txq = trans_pcie->txq[i]; if (i == trans_pcie->cmd_queue) @@ -2227,7 +2271,7 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) bool active; u8 fifo; - if (trans->cfg->use_tfh) { + if (trans->trans_cfg->use_tfh) { IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, txq->read_ptr, txq->write_ptr); /* TODO: access new SCD registers and dump them */ @@ -2244,10 +2288,10 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) jiffies_to_msecs(txq->wd_timeout), txq->read_ptr, txq->write_ptr, iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & - (trans->cfg->base_params->max_tfd_queue_size - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & - (trans->cfg->base_params->max_tfd_queue_size - 1), - iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); + (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); } static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, @@ -2335,7 +2379,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) int ret = 0; /* waiting for all the tx frames complete might take a while */ - for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { + for (cnt = 0; + cnt < trans->trans_cfg->base_params->num_of_queues; + cnt++) { if (cnt == trans_pcie->cmd_queue) continue; @@ -2363,37 +2409,6 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } -static void iwl_trans_pcie_ref(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (iwlwifi_mod_params.d0i3_disable) - return; - - pm_runtime_get(&trans_pcie->pci_dev->dev); - -#ifdef CONFIG_PM - IWL_DEBUG_RPM(trans, "runtime usage count: %d\n", - atomic_read(&trans_pcie->pci_dev->dev.power.usage_count)); -#endif /* CONFIG_PM */ -} - -static void iwl_trans_pcie_unref(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (iwlwifi_mod_params.d0i3_disable) - return; - - pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); - pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); - -#ifdef CONFIG_PM - IWL_DEBUG_RPM(trans, "runtime usage count: %d\n", - atomic_read(&trans_pcie->pci_dev->dev.power.usage_count)); -#endif /* CONFIG_PM */ -} - static const char *get_csr_string(int cmd) { #define IWL_CMD(x) case x: return #x @@ -2510,7 +2525,8 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, int ret; size_t bufsz; - bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues; + bufsz = sizeof(char) * 75 * + trans->trans_cfg->base_params->num_of_queues; if (!trans_pcie->txq_memory) return -EAGAIN; @@ -2519,7 +2535,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, if (!buf) return -ENOMEM; - for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { + for (cnt = 0; + cnt < trans->trans_cfg->base_params->num_of_queues; + cnt++) { txq = trans_pcie->txq[cnt]; pos += scnprintf(buf + pos, bufsz - pos, "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", @@ -2989,7 +3007,7 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, (*data)->len = cpu_to_le32(fh_regs_len); val = (void *)(*data)->data; - if (!trans->cfg->gen2) + if (!trans->trans_cfg->gen2) for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32)) *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); @@ -3037,7 +3055,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, { u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; write_ptr = DBGC_CUR_DBGBUF_STATUS; @@ -3057,7 +3075,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); fw_mon_data->fw_mon_base_ptr = cpu_to_le32(iwl_read_prph(trans, base)); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { fw_mon_data->fw_mon_base_high_ptr = cpu_to_le32(iwl_read_prph(trans, base_high)); write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; @@ -3074,8 +3092,8 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, if (trans->dbg.dest_tlv || (trans->dbg.num_blocks && - (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 || - trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { + (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || + trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { struct iwl_fw_error_dump_fw_mon *fw_mon_data; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); @@ -3158,7 +3176,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) trans->dbg.dest_tlv->end_shift; /* Make "end" point to the actual end */ - if (trans->cfg->device_family >= + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000 || trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) end += (1 << trans->dbg.dest_tlv->end_shift); @@ -3184,7 +3202,7 @@ static struct iwl_trans_dump_data u32 len, num_rbs = 0, monitor_len = 0; int i, ptr; bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && - !trans->cfg->mq_rx_supported && + !trans->trans_cfg->mq_rx_supported && dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); if (!dump_mask) @@ -3209,7 +3227,7 @@ static struct iwl_trans_dump_data /* FH registers */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { - if (trans->cfg->gen2) + if (trans->trans_cfg->gen2) len += sizeof(*data) + (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); @@ -3233,7 +3251,7 @@ static struct iwl_trans_dump_data } /* Paged memory for gen2 HW */ - if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) + if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) for (i = 0; i < trans->init_dram.paging_cnt; i++) len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_paging) + @@ -3288,7 +3306,8 @@ static struct iwl_trans_dump_data len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); /* Paged memory for gen2 HW */ - if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { + if (trans->trans_cfg->gen2 && + dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { for (i = 0; i < trans->init_dram.paging_cnt; i++) { struct iwl_fw_error_dump_paging *paging; u32 page_len = trans->init_dram.paging[i].size; @@ -3315,18 +3334,11 @@ static struct iwl_trans_dump_data #ifdef CONFIG_PM_SLEEP static int iwl_trans_pcie_suspend(struct iwl_trans *trans) { - if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && - (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) - return iwl_pci_fw_enter_d0i3(trans); - return 0; } static void iwl_trans_pcie_resume(struct iwl_trans *trans) { - if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && - (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) - iwl_pci_fw_exit_d0i3(trans); } #endif /* CONFIG_PM_SLEEP */ @@ -3345,8 +3357,6 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans) .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ .release_nic_access = iwl_trans_pcie_release_nic_access, \ .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ - .ref = iwl_trans_pcie_ref, \ - .unref = iwl_trans_pcie_unref, \ .dump_data = iwl_trans_pcie_dump_data, \ .d3_suspend = iwl_trans_pcie_d3_suspend, \ .d3_resume = iwl_trans_pcie_d3_resume, \ @@ -3400,6 +3410,8 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = { .tx = iwl_trans_pcie_gen2_tx, .reclaim = iwl_trans_pcie_reclaim, + .set_q_ptrs = iwl_trans_pcie_set_q_ptrs, + .txq_alloc = iwl_trans_pcie_dyn_txq_alloc, .txq_free = iwl_trans_pcie_dyn_txq_free, .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, @@ -3410,8 +3422,8 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = { }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, - const struct pci_device_id *ent, - const struct iwl_cfg *cfg) + const struct pci_device_id *ent, + const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; @@ -3421,12 +3433,13 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, if (ret) return ERR_PTR(ret); - if (cfg->gen2) + if (cfg_trans->gen2) trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, cfg, &trans_ops_pcie_gen2); + &pdev->dev, &trans_ops_pcie_gen2); else trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, cfg, &trans_ops_pcie); + &pdev->dev, &trans_ops_pcie); + if (!trans) return ERR_PTR(-ENOMEM); @@ -3445,7 +3458,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } trans_pcie->debug_rfkill = -1; - if (!cfg->base_params->pcie_l1_allowed) { + if (!cfg_trans->base_params->pcie_l1_allowed) { /* * W/A - seems to solve weird behavior. We need to remove this * if we don't want to stay in L1 all the time. This wastes a @@ -3458,7 +3471,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->def_rx_queue = 0; - if (cfg->use_tfh) { + if (cfg_trans->use_tfh) { addr_size = 64; trans_pcie->max_tbs = IWL_TFH_NUM_TBS; trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd); @@ -3520,9 +3533,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * "dash" value). To keep hw_rev backwards compatible - we'll store it * in the old format. */ - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) { - unsigned long flags; - + if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) { trans->hw_rev = (trans->hw_rev & 0xfff0) | (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); @@ -3536,94 +3547,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * in-order to recognize C step driver should read chip version * id located at the AUX bus MISC address space. */ - ret = iwl_finish_nic_init(trans); + ret = iwl_finish_nic_init(trans, cfg_trans); if (ret) goto out_no_pci; - if (iwl_trans_grab_nic_access(trans, &flags)) { - u32 hw_step; - - hw_step = iwl_read_umac_prph_no_grab(trans, - WFPM_CTRL_REG); - hw_step |= ENABLE_WFPM; - iwl_write_umac_prph_no_grab(trans, WFPM_CTRL_REG, - hw_step); - hw_step = iwl_read_prph_no_grab(trans, - CNVI_AUX_MISC_CHIP); - hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; - if (hw_step == 0x3) - trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) | - (SILICON_C_STEP << 2); - iwl_trans_release_nic_access(trans, &flags); - } } IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); -#if IS_ENABLED(CONFIG_IWLMVM) - trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); - - if (cfg == &iwlax210_2ax_cfg_so_hr_a0) { - if (trans->hw_rev == CSR_HW_REV_TYPE_TY) { - trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0; - } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { - trans->cfg = &iwlax210_2ax_cfg_so_jf_a0; - } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) { - trans->cfg = &iwlax211_2ax_cfg_so_gf_a0; - } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) { - trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0; - } - } else if (cfg == &iwl_ax101_cfg_qu_hr) { - if ((CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && - trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) || - (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) { - trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; - } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { - trans->cfg = &iwl_ax101_cfg_qu_hr; - } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { - trans->cfg = &iwl22000_2ax_cfg_jf; - } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) { - IWL_ERR(trans, "RF ID HRCDB is not supported\n"); - ret = -EINVAL; - goto out_no_pci; - } else { - IWL_ERR(trans, "Unrecognized RF ID 0x%08x\n", - CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id)); - ret = -EINVAL; - goto out_no_pci; - } - } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && - trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) { - u32 hw_status; - - hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); - if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP) - /* - * b step fw is the same for physical card and fpga - */ - trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; - else if ((hw_status & UMAG_GEN_HW_IS_FPGA) && - CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) { - trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0; - } else { - /* - * a step no FPGA - */ - trans->cfg = &iwl22000_2ac_cfg_hr; - } - } -#endif - - iwl_pcie_set_interrupt_capa(pdev, trans); + iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); @@ -3631,7 +3563,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); - init_waitqueue_head(&trans_pcie->d0i3_waitq); + init_waitqueue_head(&trans_pcie->sx_waitq); if (trans_pcie->msix_enabled) { ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); @@ -3657,12 +3589,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, WQ_HIGHPRI | WQ_UNBOUND, 1); INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); -#ifdef CONFIG_IWLWIFI_PCIE_RTPM - trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; -#else - trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED; -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ - #ifdef CONFIG_IWLWIFI_DEBUGFS trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; mutex_init(&trans_pcie->fw_mon_data.mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 9ef6b8fe03c1..8894027429d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -50,7 +50,6 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ -#include <linux/pm_runtime.h> #include <net/tso.h> #include <linux/tcp.h> @@ -114,7 +113,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, */ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) { /* Starting from 22560, the HW expects bytes */ WARN_ON(trans_pcie->bc_table_dword); WARN_ON(len > 0x3FFF); @@ -548,7 +547,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, memset(tfd, 0, sizeof(*tfd)); - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560) len = sizeof(struct iwl_tx_cmd_gen2); else len = sizeof(struct iwl_tx_cmd_gen3); @@ -630,7 +629,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, return -1; } - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) { struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = (void *)dev_cmd->payload; @@ -647,12 +646,8 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, iwl_pcie_gen2_get_num_tbs(trans, tfd)); /* start timer if queue currently empty */ - if (txq->read_ptr == txq->write_ptr) { - if (txq->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); - IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); - iwl_trans_ref(trans); - } + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); /* Tell device the write index *just past* this latest filled TFD */ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); @@ -897,12 +892,6 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); spin_lock_irqsave(&trans_pcie->reg_lock, flags); - if (!(cmd->flags & CMD_SEND_IN_IDLE) && - !trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = true; - IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); - iwl_trans_ref(trans); - } /* Increment and update queue's write index */ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); @@ -936,16 +925,6 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); - if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { - ret = wait_event_timeout(trans_pcie->d0i3_waitq, - pm_runtime_active(&trans_pcie->pci_dev->dev), - msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); - if (!ret) { - IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); - return -ETIMEDOUT; - } - } - cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; @@ -1070,23 +1049,6 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) } iwl_pcie_gen2_free_tfd(trans, txq); txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); - - if (txq->read_ptr == txq->write_ptr) { - unsigned long flags; - - spin_lock_irqsave(&trans_pcie->reg_lock, flags); - if (txq_id != trans_pcie->cmd_queue) { - IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", - txq->id); - iwl_trans_unref(trans); - } else if (trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = false; - IWL_DEBUG_RPM(trans, - "clear ref_cmd_in_flight\n"); - iwl_trans_unref(trans); - } - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); - } } while (!skb_queue_empty(&txq->overflow_q)) { @@ -1167,7 +1129,7 @@ int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, if (!txq) return -ENOMEM; ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, - (trans->cfg->device_family >= + (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? sizeof(struct iwl_gen3_bc_tbl) : sizeof(struct iwlagn_scd_bc_tbl)); @@ -1231,7 +1193,7 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, txq->id = qid; trans_pcie->txq[qid] = txq; - wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1); + wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); /* Place first TFD at index corresponding to start sequence number */ txq->read_ptr = wr_ptr; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 2f0ba7ef53b8..4806a04cec8c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -65,7 +65,6 @@ #include <linux/ieee80211.h> #include <linux/slab.h> #include <linux/sched.h> -#include <linux/pm_runtime.h> #include <net/ip6_checksum.h> #include <net/tso.h> @@ -114,17 +113,17 @@ int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q) * If q->n_window is smaller than max_tfd_queue_size, there is no need * to reserve any queue entries for this purpose. */ - if (q->n_window < trans->cfg->base_params->max_tfd_queue_size) + if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) max = q->n_window; else - max = trans->cfg->base_params->max_tfd_queue_size - 1; + max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; /* * max_tfd_queue_size is a power of 2, so the following is equivalent to * modulo by max_tfd_queue_size and is well defined. */ used = (q->write_ptr - q->read_ptr) & - (trans->cfg->base_params->max_tfd_queue_size - 1); + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); if (WARN_ON(used > max)) return 0; @@ -293,7 +292,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, * 2. NIC is woken up for CMD regardless of shadow outside this function * 3. there is a chance that the NIC is asleep */ - if (!trans->cfg->base_params->shadow_reg_enable && + if (!trans->trans_cfg->base_params->shadow_reg_enable && txq_id != trans_pcie->cmd_queue && test_bit(STATUS_TPOWER_PMI, &trans->status)) { /* @@ -307,7 +306,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", txq_id, reg); iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + BIT(trans->trans_cfg->csr->flag_mac_access_req)); txq->need_update = true; return; } @@ -328,7 +327,7 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { struct iwl_txq *txq = trans_pcie->txq[i]; if (!test_bit(i, trans_pcie->queue_used)) @@ -347,7 +346,7 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, void *_tfd, u8 idx) { - if (trans->cfg->use_tfh) { + if (trans->trans_cfg->use_tfh) { struct iwl_tfh_tfd *tfd = _tfd; struct iwl_tfh_tb *tb = &tfd->tbs[idx]; @@ -390,7 +389,7 @@ static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) { - if (trans->cfg->use_tfh) { + if (trans->trans_cfg->use_tfh) { struct iwl_tfh_tfd *tfd = _tfd; return le16_to_cpu(tfd->num_tbs) & 0x1f; @@ -437,7 +436,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, meta->tbs = 0; - if (trans->cfg->use_tfh) { + if (trans->trans_cfg->use_tfh) { struct iwl_tfh_tfd *tfd_fh = (void *)tfd; tfd_fh->num_tbs = 0; @@ -525,14 +524,14 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); size_t tfd_sz = trans_pcie->tfd_size * - trans->cfg->base_params->max_tfd_queue_size; + trans->trans_cfg->base_params->max_tfd_queue_size; size_t tb0_buf_sz; int i; if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; - if (trans->cfg->use_tfh) + if (trans->trans_cfg->use_tfh) tfd_sz = trans_pcie->tfd_size * slots_num; timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0); @@ -591,7 +590,8 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { int ret; - u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size; + u32 tfd_queue_max_size = + trans->trans_cfg->base_params->max_tfd_queue_size; txq->need_update = false; @@ -639,20 +639,14 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) lockdep_assert_held(&trans_pcie->reg_lock); - if (trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = false; - IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); - iwl_trans_unref(trans); - } - - if (!trans->cfg->base_params->apmg_wake_up_wa) + if (!trans->trans_cfg->base_params->apmg_wake_up_wa) return; if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) return; trans_pcie->cmd_hold_nic_awake = false; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + BIT(trans->trans_cfg->csr->flag_mac_access_req)); } /* @@ -683,13 +677,8 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) unsigned long flags; spin_lock_irqsave(&trans_pcie->reg_lock, flags); - if (txq_id != trans_pcie->cmd_queue) { - IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", - txq->id); - iwl_trans_unref(trans); - } else { + if (txq_id == trans_pcie->cmd_queue) iwl_pcie_clear_cmd_in_flight(trans); - } spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } } @@ -737,7 +726,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) if (txq->tfds) { dma_free_coherent(dev, trans_pcie->tfd_size * - trans->cfg->base_params->max_tfd_queue_size, + trans->trans_cfg->base_params->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; txq->tfds = NULL; @@ -759,7 +748,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int nq = trans->cfg->base_params->num_of_queues; + int nq = trans->trans_cfg->base_params->num_of_queues; int chan; u32 reg_val; int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - @@ -786,7 +775,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. */ - if (trans->cfg->base_params->scd_chain_ext_wa) + if (trans->trans_cfg->base_params->scd_chain_ext_wa) iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, @@ -808,7 +797,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); /* Enable L1-Active */ - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } @@ -822,13 +811,13 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) * we should never get here in gen2 trans mode return early to avoid * having invalid accesses */ - if (WARN_ON_ONCE(trans->cfg->gen2)) + if (WARN_ON_ONCE(trans->trans_cfg->gen2)) return; - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { struct iwl_txq *txq = trans_pcie->txq[txq_id]; - if (trans->cfg->use_tfh) + if (trans->trans_cfg->use_tfh) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), txq->dma_addr); @@ -911,7 +900,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans) return 0; /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) iwl_pcie_txq_unmap(trans, txq_id); @@ -933,7 +922,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) /* Tx queues */ if (trans_pcie->txq_memory) { for (txq_id = 0; - txq_id < trans->cfg->base_params->num_of_queues; + txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { iwl_pcie_txq_free(trans, txq_id); trans_pcie->txq[txq_id] = NULL; @@ -957,9 +946,10 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) int ret; int txq_id, slots_num; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 bc_tbls_size = trans->cfg->base_params->num_of_queues; + u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; - bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? + bc_tbls_size *= (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_22560) ? sizeof(struct iwl_gen3_bc_tbl) : sizeof(struct iwlagn_scd_bc_tbl); @@ -984,8 +974,9 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) goto error; } - trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues, - sizeof(struct iwl_txq), GFP_KERNEL); + trans_pcie->txq_memory = + kcalloc(trans->trans_cfg->base_params->num_of_queues, + sizeof(struct iwl_txq), GFP_KERNEL); if (!trans_pcie->txq_memory) { IWL_ERR(trans, "Not enough memory for txq\n"); ret = -ENOMEM; @@ -993,7 +984,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) } /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); @@ -1047,7 +1038,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) spin_unlock(&trans_pcie->irq_lock); /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); @@ -1075,7 +1066,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); - if (trans->cfg->base_params->num_of_queues > 20) + if (trans->trans_cfg->base_params->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_ENABLE_31_QUEUES); @@ -1147,7 +1138,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, IWL_ERR(trans, "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, - trans->cfg->base_params->max_tfd_queue_size, + trans->trans_cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); goto out; } @@ -1170,7 +1161,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, txq->entries[read_ptr].skb = NULL; - if (!trans->cfg->use_tfh) + if (!trans->trans_cfg->use_tfh) iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); iwl_pcie_txq_free_tfd(trans, txq); @@ -1225,20 +1216,28 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, txq->overflow_tx = false; } - if (txq->read_ptr == txq->write_ptr) { - IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id); - iwl_trans_unref(trans); - } - out: spin_unlock_bh(&txq->lock); } +/* Set wr_ptr of specific device and txq */ +void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[txq_id]; + + spin_lock_bh(&txq->lock); + + txq->write_ptr = ptr; + txq->read_ptr = txq->write_ptr; + + spin_unlock_bh(&txq->lock); +} + static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, const struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - const struct iwl_cfg *cfg = trans->cfg; int ret; lockdep_assert_held(&trans_pcie->reg_lock); @@ -1247,32 +1246,25 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; - if (!(cmd->flags & CMD_SEND_IN_IDLE) && - !trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = true; - IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); - iwl_trans_ref(trans); - } - /* * wake up the NIC to make sure that the firmware will see the host * command - we will let the NIC sleep once all the host commands * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set. */ - if (cfg->base_params->apmg_wake_up_wa && + if (trans->trans_cfg->base_params->apmg_wake_up_wa && !trans_pcie->cmd_hold_nic_awake) { __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - BIT(cfg->csr->flag_mac_access_req)); + BIT(trans->trans_cfg->csr->flag_mac_access_req)); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(cfg->csr->flag_val_mac_access_en), - (BIT(cfg->csr->flag_mac_clock_ready) | + BIT(trans->trans_cfg->csr->flag_val_mac_access_en), + (BIT(trans->trans_cfg->csr->flag_mac_clock_ready) | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (ret < 0) { __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - BIT(cfg->csr->flag_mac_access_req)); + BIT(trans->trans_cfg->csr->flag_mac_access_req)); IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); return -EIO; } @@ -1302,12 +1294,12 @@ void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) idx = iwl_pcie_get_cmd_index(txq, idx); r = iwl_pcie_get_cmd_index(txq, txq->read_ptr); - if (idx >= trans->cfg->base_params->max_tfd_queue_size || + if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || (!iwl_queue_used(txq, idx))) { WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, - trans->cfg->base_params->max_tfd_queue_size, + trans->trans_cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); return; } @@ -1421,7 +1413,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, * this sad hardware issue. * This bug has been fixed on devices 9000 and up. */ - scd_bug = !trans->cfg->mq_rx_supported && + scd_bug = !trans->trans_cfg->mq_rx_supported && !((ssn - txq->write_ptr) & 0x3f) && (ssn != txq->write_ptr); if (scd_bug) @@ -1867,20 +1859,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, wake_up(&trans_pcie->wait_command_queue); } - if (meta->flags & CMD_MAKE_TRANS_IDLE) { - IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", - iwl_get_cmd_string(trans, cmd->hdr.cmd)); - set_bit(STATUS_TRANS_IDLE, &trans->status); - wake_up(&trans_pcie->d0i3_waitq); - } - - if (meta->flags & CMD_WAKE_UP_TRANS) { - IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", - iwl_get_cmd_string(trans, cmd->hdr.cmd)); - clear_bit(STATUS_TRANS_IDLE, &trans->status); - wake_up(&trans_pcie->d0i3_waitq); - } - meta->flags = 0; spin_unlock_bh(&txq->lock); @@ -1927,16 +1905,6 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", iwl_get_cmd_string(trans, cmd->id)); - if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { - ret = wait_event_timeout(trans_pcie->d0i3_waitq, - pm_runtime_active(&trans_pcie->pci_dev->dev), - msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); - if (!ret) { - IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); - return -ETIMEDOUT; - } - } - cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; @@ -2504,22 +2472,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ - if (txq->read_ptr == txq->write_ptr) { - if (txq->wd_timeout) { - /* - * If the TXQ is active, then set the timer, if not, - * set the timer in remainder so that the timer will - * be armed with the right value when the station will - * wake up. - */ - if (!txq->frozen) - mod_timer(&txq->stuck_timer, - jiffies + txq->wd_timeout); - else - txq->frozen_expiry_remainder = txq->wd_timeout; - } - IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); - iwl_trans_ref(trans); + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; } /* Tell device the write index *just past* this latest filled TFD */ diff --git a/drivers/net/wireless/intersil/hostap/hostap_download.c b/drivers/net/wireless/intersil/hostap/hostap_download.c index 4507614a7c5a..8722000b6c27 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_download.c +++ b/drivers/net/wireless/intersil/hostap/hostap_download.c @@ -407,10 +407,8 @@ static int prism2_enable_genesis(local_info_t *local, int hcr) hcr); return 0; } else { - printk(KERN_DEBUG "Readback test failed, HCR 0x%02x " - "write %02x %02x %02x %02x read %02x %02x %02x %02x\n", - hcr, initseq[0], initseq[1], initseq[2], initseq[3], - readbuf[0], readbuf[1], readbuf[2], readbuf[3]); + printk(KERN_DEBUG "Readback test failed, HCR 0x%02x write %4ph read %4ph\n", + hcr, initseq, readbuf); return 1; } } diff --git a/drivers/net/wireless/intersil/hostap/hostap_plx.c b/drivers/net/wireless/intersil/hostap/hostap_plx.c index 943070d39b1e..841cfc68ce84 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_plx.c +++ b/drivers/net/wireless/intersil/hostap/hostap_plx.c @@ -352,8 +352,7 @@ static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len, /* read CIS; it is in even offsets in the beginning of attr_mem */ for (i = 0; i < CIS_MAX_LEN; i++) cis[i] = readb(attr_mem + 2 * i); - printk(KERN_DEBUG "%s: CIS: %02x %02x %02x %02x %02x %02x ...\n", - dev_info, cis[0], cis[1], cis[2], cis[3], cis[4], cis[5]); + printk(KERN_DEBUG "%s: CIS: %6ph ...\n", dev_info, cis); /* set reasonable defaults for Prism2 cards just in case CIS parsing * fails */ diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c index 703d74cea3c2..6151d8db5924 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_proc.c +++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c @@ -234,7 +234,7 @@ static int prism2_io_debug_proc_read(char *page, char **start, off_t off, { local_info_t *local = (local_info_t *) data; int head = local->io_debug_head; - int start_bytes, left, copy, copied; + int start_bytes, left, copy; if (off + count > PRISM2_IO_DEBUG_SIZE * 4) { *eof = 1; @@ -243,7 +243,6 @@ static int prism2_io_debug_proc_read(char *page, char **start, off_t off, count = PRISM2_IO_DEBUG_SIZE * 4 - off; } - copied = 0; start_bytes = (PRISM2_IO_DEBUG_SIZE - head) * 4; left = count; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 772e54f0696f..635956024e88 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2216,7 +2216,8 @@ static int mac80211_hwsim_roc(struct ieee80211_hw *hw, return 0; } -static int mac80211_hwsim_croc(struct ieee80211_hw *hw) +static int mac80211_hwsim_croc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct mac80211_hwsim_data *hwsim = hw->priv; @@ -2496,116 +2497,211 @@ out_err: nlmsg_free(mcast_skb); } -static const struct ieee80211_sband_iftype_data he_capa_2ghz = { - /* TODO: should we support other types, e.g., P2P?*/ - .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), - .he_cap = { - .has_he = true, - .he_cap_elem = { - .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE, - .mac_cap_info[1] = - IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | - IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, - .mac_cap_info[2] = - IEEE80211_HE_MAC_CAP2_BSR | - IEEE80211_HE_MAC_CAP2_MU_CASCADING | - IEEE80211_HE_MAC_CAP2_ACK_EN, - .mac_cap_info[3] = - IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, - .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, - .phy_cap_info[1] = - IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | - IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | - IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | - IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, - .phy_cap_info[2] = - IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | - IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | - IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | - IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | - IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, - - /* Leave all the other PHY capability bytes unset, as - * DCM, beam forming, RU and PPE threshold information - * are not supported - */ +static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = { + { + /* TODO: should we support other types, e.g., P2P?*/ + .types_mask = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + + /* Leave all the other PHY capability bytes + * unset, as DCM, beam forming, RU and PPE + * threshold information are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xffff), + .tx_mcs_160 = cpu_to_le16(0xffff), + .rx_mcs_80p80 = cpu_to_le16(0xffff), + .tx_mcs_80p80 = cpu_to_le16(0xffff), + }, }, - .he_mcs_nss_supp = { - .rx_mcs_80 = cpu_to_le16(0xfffa), - .tx_mcs_80 = cpu_to_le16(0xfffa), - .rx_mcs_160 = cpu_to_le16(0xffff), - .tx_mcs_160 = cpu_to_le16(0xffff), - .rx_mcs_80p80 = cpu_to_le16(0xffff), - .tx_mcs_80p80 = cpu_to_le16(0xffff), + }, +#ifdef CONFIG_MAC80211_MESH + { + /* TODO: should we support other types, e.g., IBSS?*/ + .types_mask = BIT(NL80211_IFTYPE_MESH_POINT), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + .phy_cap_info[2] = 0, + + /* Leave all the other PHY capability bytes + * unset, as DCM, beam forming, RU and PPE + * threshold information are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xffff), + .tx_mcs_160 = cpu_to_le16(0xffff), + .rx_mcs_80p80 = cpu_to_le16(0xffff), + .tx_mcs_80p80 = cpu_to_le16(0xffff), + }, }, }, +#endif }; -static const struct ieee80211_sband_iftype_data he_capa_5ghz = { - /* TODO: should we support other types, e.g., P2P?*/ - .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), - .he_cap = { - .has_he = true, - .he_cap_elem = { - .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE, - .mac_cap_info[1] = - IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | - IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, - .mac_cap_info[2] = - IEEE80211_HE_MAC_CAP2_BSR | - IEEE80211_HE_MAC_CAP2_MU_CASCADING | - IEEE80211_HE_MAC_CAP2_ACK_EN, - .mac_cap_info[3] = - IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, - .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, - .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, - .phy_cap_info[1] = - IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | - IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | - IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | - IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, - .phy_cap_info[2] = - IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | - IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | - IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | - IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | - IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, - - /* Leave all the other PHY capability bytes unset, as - * DCM, beam forming, RU and PPE threshold information - * are not supported - */ +static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = { + { + /* TODO: should we support other types, e.g., P2P?*/ + .types_mask = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + + /* Leave all the other PHY capability bytes + * unset, as DCM, beam forming, RU and PPE + * threshold information are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xfffa), + .tx_mcs_80p80 = cpu_to_le16(0xfffa), + }, }, - .he_mcs_nss_supp = { - .rx_mcs_80 = cpu_to_le16(0xfffa), - .tx_mcs_80 = cpu_to_le16(0xfffa), - .rx_mcs_160 = cpu_to_le16(0xfffa), - .tx_mcs_160 = cpu_to_le16(0xfffa), - .rx_mcs_80p80 = cpu_to_le16(0xfffa), - .tx_mcs_80p80 = cpu_to_le16(0xfffa), + }, +#ifdef CONFIG_MAC80211_MESH + { + /* TODO: should we support other types, e.g., IBSS?*/ + .types_mask = BIT(NL80211_IFTYPE_MESH_POINT), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + .phy_cap_info[2] = 0, + + /* Leave all the other PHY capability bytes + * unset, as DCM, beam forming, RU and PPE + * threshold information are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xfffa), + .tx_mcs_80p80 = cpu_to_le16(0xfffa), + }, }, }, +#endif }; -static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband) +static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband) { - if (sband->band == NL80211_BAND_2GHZ) + u16 n_iftype_data; + + if (sband->band == NL80211_BAND_2GHZ) { + n_iftype_data = ARRAY_SIZE(he_capa_2ghz); sband->iftype_data = - (struct ieee80211_sband_iftype_data *)&he_capa_2ghz; - else if (sband->band == NL80211_BAND_5GHZ) + (struct ieee80211_sband_iftype_data *)he_capa_2ghz; + } else if (sband->band == NL80211_BAND_5GHZ) { + n_iftype_data = ARRAY_SIZE(he_capa_5ghz); sband->iftype_data = - (struct ieee80211_sband_iftype_data *)&he_capa_5ghz; - else + (struct ieee80211_sband_iftype_data *)he_capa_5ghz; + } else { return; + } - sband->n_iftype_data = 1; + sband->n_iftype_data = n_iftype_data; } #ifdef CONFIG_MAC80211_MESH @@ -2805,12 +2901,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, TDLS_WIDER_BW); - - /* We only have SW crypto and only implement the A-MPDU API - * (but don't really build A-MPDUs) so can have extended key - * support - */ - ieee80211_hw_set(hw, EXT_KEY_ID_NATIVE); if (rctbl) ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); @@ -2897,7 +2987,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, sband->ht_cap.mcs.rx_mask[1] = 0xff; sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - mac80211_hswim_he_capab(sband); + mac80211_hwsim_he_capab(sband); hw->wiphy->bands[band] = sband; } @@ -3233,6 +3323,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, { struct mac80211_hwsim_data *data2; struct ieee80211_rx_status rx_status; + struct ieee80211_hdr *hdr; const u8 *dst; int frame_data_len; void *frame_data; @@ -3299,6 +3390,12 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); + hdr = (void *)skb->data; + + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) + rx_status.boottime_ns = ktime_get_boottime_ns(); + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); data2->rx_pkts++; data2->rx_bytes += skb->len; diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h index 469134930026..4b6e05a8e5d5 100644 --- a/drivers/net/wireless/marvell/libertas/dev.h +++ b/drivers/net/wireless/marvell/libertas/dev.h @@ -58,8 +58,6 @@ struct lbs_private { #ifdef CONFIG_LIBERTAS_MESH struct lbs_mesh_stats mstats; uint16_t mesh_tlv; - u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1]; - u8 mesh_ssid_len; u8 mesh_channel; #endif diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c index 27067e79e83f..d07fe82c557e 100644 --- a/drivers/net/wireless/marvell/libertas/if_spi.c +++ b/drivers/net/wireless/marvell/libertas/if_spi.c @@ -766,19 +766,15 @@ static int if_spi_c2h_data(struct if_spi_card *card) /* Read the data from the WLAN module into our skb... */ err = spu_read(card, IF_SPI_DATA_RDWRPORT_REG, data, ALIGN(len, 4)); - if (err) - goto free_skb; + if (err) { + dev_kfree_skb(skb); + goto out; + } /* pass the SKB to libertas */ err = lbs_process_rxed_packet(card->priv, skb); - if (err) - goto free_skb; - - /* success */ - goto out; + /* lbs_process_rxed_packet() consumes the skb */ -free_skb: - dev_kfree_skb(skb); out: if (err) netdev_err(priv->dev, "%s: err=%d\n", __func__, err); diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index afac2481909b..20436a289d5c 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -50,7 +50,8 @@ static const struct lbs_fw_table fw_table[] = { { MODEL_8388, "libertas/usb8388_v5.bin", NULL }, { MODEL_8388, "libertas/usb8388.bin", NULL }, { MODEL_8388, "usb8388.bin", NULL }, - { MODEL_8682, "libertas/usb8682.bin", NULL } + { MODEL_8682, "libertas/usb8682.bin", NULL }, + { 0, NULL, NULL } }; static const struct usb_device_id if_usb_table[] = { diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index 5968852b65a7..2233b59cdf44 100644 --- a/drivers/net/wireless/marvell/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c @@ -1046,7 +1046,7 @@ int lbs_rtap_supported(struct lbs_private *priv) int lbs_start_card(struct lbs_private *priv) { struct net_device *dev = priv->dev; - int ret = -1; + int ret; /* poke the firmware */ ret = lbs_setup_firmware(priv); diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c index 2315fdff56c2..2747c957d18c 100644 --- a/drivers/net/wireless/marvell/libertas/mesh.c +++ b/drivers/net/wireless/marvell/libertas/mesh.c @@ -86,6 +86,7 @@ static int lbs_mesh_config_send(struct lbs_private *priv, static int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan) { + struct wireless_dev *mesh_wdev; struct cmd_ds_mesh_config cmd; struct mrvl_meshie *ie; @@ -105,10 +106,17 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action, ie->val.active_protocol_id = MARVELL_MESH_PROTO_ID_HWMP; ie->val.active_metric_id = MARVELL_MESH_METRIC_ID; ie->val.mesh_capability = MARVELL_MESH_CAPABILITY; - ie->val.mesh_id_len = priv->mesh_ssid_len; - memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len); + + if (priv->mesh_dev) { + mesh_wdev = priv->mesh_dev->ieee80211_ptr; + ie->val.mesh_id_len = mesh_wdev->mesh_id_up_len; + memcpy(ie->val.mesh_id, mesh_wdev->ssid, + mesh_wdev->mesh_id_up_len); + } + ie->len = sizeof(struct mrvl_meshie_val) - - IEEE80211_MAX_SSID_LEN + priv->mesh_ssid_len; + IEEE80211_MAX_SSID_LEN + ie->val.mesh_id_len; + cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val)); break; case CMD_ACT_MESH_CONFIG_STOP: @@ -117,8 +125,8 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action, return -1; } lbs_deb_cmd("mesh config action %d type %x channel %d SSID %*pE\n", - action, priv->mesh_tlv, chan, priv->mesh_ssid_len, - priv->mesh_ssid); + action, priv->mesh_tlv, chan, ie->val.mesh_id_len, + ie->val.mesh_id); return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv); } @@ -863,12 +871,6 @@ int lbs_init_mesh(struct lbs_private *priv) /* Stop meshing until interface is brought up */ lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, 1); - if (priv->mesh_tlv) { - sprintf(priv->mesh_ssid, "mesh"); - priv->mesh_ssid_len = 4; - ret = 1; - } - return ret; } @@ -997,6 +999,13 @@ static int lbs_add_mesh(struct lbs_private *priv) mesh_wdev->iftype = NL80211_IFTYPE_MESH_POINT; mesh_wdev->wiphy = priv->wdev->wiphy; + + if (priv->mesh_tlv) { + sprintf(mesh_wdev->ssid, "mesh"); + mesh_wdev->mesh_id_up_len = 4; + ret = 1; + } + mesh_wdev->netdev = mesh_dev; mesh_dev->ml_priv = priv; diff --git a/drivers/net/wireless/marvell/libertas/mesh.h b/drivers/net/wireless/marvell/libertas/mesh.h index dfe22c91aade..1561018f226f 100644 --- a/drivers/net/wireless/marvell/libertas/mesh.h +++ b/drivers/net/wireless/marvell/libertas/mesh.h @@ -24,8 +24,7 @@ void lbs_remove_mesh(struct lbs_private *priv); static inline bool lbs_mesh_activated(struct lbs_private *priv) { - /* Mesh SSID is only programmed after successful init */ - return priv->mesh_ssid_len != 0; + return !!priv->mesh_tlv; } int lbs_mesh_set_channel(struct lbs_private *priv, u8 channel); diff --git a/drivers/net/wireless/marvell/libertas_tf/cmd.c b/drivers/net/wireless/marvell/libertas_tf/cmd.c index 1eacca0d079b..a0b4c9debc11 100644 --- a/drivers/net/wireless/marvell/libertas_tf/cmd.c +++ b/drivers/net/wireless/marvell/libertas_tf/cmd.c @@ -65,7 +65,7 @@ static void lbtf_geo_init(struct lbtf_private *priv) break; } - for (ch = priv->range.start; ch < priv->range.end; ch++) + for (ch = range->start; ch < range->end; ch++) priv->channels[CHAN_TO_IDX(ch)].flags = 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 6c0e52eb8794..1aa93e7e9835 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -59,7 +59,7 @@ static void wakeup_timer_fn(struct timer_list *t) adapter->hw_status = MWIFIEX_HW_STATUS_RESET; mwifiex_cancel_all_pending_cmd(adapter); - if (adapter->if_ops.card_reset && !adapter->hs_activated) + if (adapter->if_ops.card_reset) adapter->if_ops.card_reset(adapter); } diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index b54f73e3d508..eff06d59e9df 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -150,10 +150,8 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) static int mwifiex_pcie_suspend(struct device *dev) { struct mwifiex_adapter *adapter; - struct pcie_service_card *card; - struct pci_dev *pdev = to_pci_dev(dev); + struct pcie_service_card *card = dev_get_drvdata(dev); - card = pci_get_drvdata(pdev); /* Might still be loading firmware */ wait_for_completion(&card->fw_done); @@ -195,10 +193,8 @@ static int mwifiex_pcie_suspend(struct device *dev) static int mwifiex_pcie_resume(struct device *dev) { struct mwifiex_adapter *adapter; - struct pcie_service_card *card; - struct pci_dev *pdev = to_pci_dev(dev); + struct pcie_service_card *card = dev_get_drvdata(dev); - card = pci_get_drvdata(pdev); if (!card->adapter) { dev_err(dev, "adapter structure is not valid\n"); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 21dda385f6c6..593c594982cb 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1244,7 +1244,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, mwifiex_dbg(adapter, ERROR, "err: InterpretIE: in processing\t" "IE, bytes left < IE length\n"); - return -1; + return -EINVAL; } switch (element_id) { case WLAN_EID_SSID: diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 18e654dc34c6..09313047beed 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -731,7 +731,6 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u16 status_code, struct sk_buff *skb) { struct ieee80211_mgmt *mgmt; - u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; int ret; u16 capab; struct ieee80211_ht_cap *ht_cap; @@ -765,7 +764,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, memmove(pos + ETH_ALEN, &mgmt->u.action.category, sizeof(mgmt->u.action.u.tdls_discover_resp)); /* init address 4 */ - memcpy(pos, bc_addr, ETH_ALEN); + eth_broadcast_addr(pos); ret = mwifiex_tdls_append_rates_ie(priv, skb); if (ret) { diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index 27e3ff039c48..8f3d36a15e17 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76.h" @@ -34,8 +23,9 @@ mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx) } static void -mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, struct sk_buff_head *frames, - u16 head) +mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, + struct sk_buff_head *frames, + u16 head) { int idx; @@ -74,15 +64,14 @@ mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames) for (idx = (tid->head + 1) % tid->size; idx != start && nframes; idx = (idx + 1) % tid->size) { - skb = tid->reorder_buf[idx]; if (!skb) continue; nframes--; - status = (struct mt76_rx_status *) skb->cb; - if (!time_after(jiffies, status->reorder_time + - REORDER_TIMEOUT)) + status = (struct mt76_rx_status *)skb->cb; + if (!time_after(jiffies, + status->reorder_time + REORDER_TIMEOUT)) continue; mt76_rx_aggr_release_frames(tid, frames, status->seqno); @@ -122,8 +111,8 @@ mt76_rx_aggr_reorder_work(struct work_struct *work) static void mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) { - struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; - struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data; + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; struct mt76_wcid *wcid = status->wcid; struct mt76_rx_tid *tid; u16 seqno; @@ -148,8 +137,8 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) { - struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct mt76_wcid *wcid = status->wcid; struct ieee80211_sta *sta; struct mt76_rx_tid *tid; @@ -233,7 +222,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) tid->nframes++; mt76_rx_aggr_release_head(tid, frames); - ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT); + ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, + REORDER_TIMEOUT); out: spin_unlock_bh(&tid->lock); diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index c6a9fe2aef9d..d95b73fd0d2b 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76.h" diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index d8f61e540bfd..c747eb24581c 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/dma-mapping.h> @@ -442,6 +431,12 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) mt76_dma_rx_cleanup(dev, q); mt76_dma_sync_idx(dev, q); mt76_dma_rx_fill(dev, q); + + if (!q->rx_head) + return; + + dev_kfree_skb(q->rx_head); + q->rx_head = NULL; } static void @@ -504,7 +499,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) skb_reserve(skb, q->buf_offset); if (q == &dev->q_rx[MT_RXQ_MCU]) { - u32 *rxfce = (u32 *) skb->cb; + u32 *rxfce = (u32 *)skb->cb; *rxfce = info; } diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h index 03dd2bafa4e8..e7c27697ef04 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.h +++ b/drivers/net/wireless/mediatek/mt76/dma.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76_DMA_H #define __MT76_DMA_H diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index b7a49ae6b327..804224e81103 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/of.h> #include <linux/of_net.h> diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index ec9efb79985f..1a2c143b34d0 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/of.h> #include "mt76.h" @@ -294,6 +283,8 @@ mt76_alloc_device(struct device *pdev, unsigned int size, init_waitqueue_head(&dev->tx_wait); skb_queue_head_init(&dev->status_list); + tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev); + return dev; } EXPORT_SYMBOL_GPL(mt76_alloc_device); @@ -415,11 +406,6 @@ void mt76_set_channel(struct mt76_dev *dev) bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; int timeout = HZ / 5; - if (offchannel) - set_bit(MT76_OFFCHANNEL, &dev->state); - else - clear_bit(MT76_OFFCHANNEL, &dev->state); - wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout); if (dev->drv->update_survey) @@ -487,9 +473,10 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, if (!key) return; - if (key->cipher == WLAN_CIPHER_SUITE_CCMP) - wcid->rx_check_pn = true; + if (key->cipher != WLAN_CIPHER_SUITE_CCMP) + return; + wcid->rx_check_pn = true; for (i = 0; i < IEEE80211_NUM_TIDS; i++) { ieee80211_get_key_rx_seq(key, i, &seq); memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); @@ -497,12 +484,12 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, } EXPORT_SYMBOL(mt76_wcid_key_setup); -struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb) +static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct mt76_rx_status mstat; - mstat = *((struct mt76_rx_status *) skb->cb); + mstat = *((struct mt76_rx_status *)skb->cb); memset(status, 0, sizeof(*status)); status->flag = mstat.flag; @@ -517,17 +504,18 @@ struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb) status->chains = mstat.chains; BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb)); - BUILD_BUG_ON(sizeof(status->chain_signal) != sizeof(mstat.chain_signal)); - memcpy(status->chain_signal, mstat.chain_signal, sizeof(mstat.chain_signal)); + BUILD_BUG_ON(sizeof(status->chain_signal) != + sizeof(mstat.chain_signal)); + memcpy(status->chain_signal, mstat.chain_signal, + sizeof(mstat.chain_signal)); return wcid_to_sta(mstat.wcid); } -EXPORT_SYMBOL(mt76_rx_convert); static int mt76_check_ccmp_pn(struct sk_buff *skb) { - struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_wcid *wcid = status->wcid; struct ieee80211_hdr *hdr; int ret; @@ -543,7 +531,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb) * Validate the first fragment both here and in mac80211 * All further fragments will be validated by mac80211 only. */ - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_frag(hdr) && !ieee80211_is_first_frag(hdr->frame_control)) return 0; @@ -566,8 +554,8 @@ mt76_check_ccmp_pn(struct sk_buff *skb) static void mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) { - struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_sta *sta; struct mt76_wcid *wcid = status->wcid; bool ps; @@ -576,13 +564,13 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) { sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL); if (sta) - wcid = status->wcid = (struct mt76_wcid *) sta->drv_priv; + wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv; } if (!wcid || !wcid->sta) return; - sta = container_of((void *) wcid, struct ieee80211_sta, drv_priv); + sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); if (status->signal <= 0) ewma_signal_add(&wcid->rssi, -status->signal); @@ -598,8 +586,8 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) } if (ieee80211_has_morefrags(hdr->frame_control) || - !(ieee80211_is_mgmt(hdr->frame_control) || - ieee80211_is_data(hdr->frame_control))) + !(ieee80211_is_mgmt(hdr->frame_control) || + ieee80211_is_data(hdr->frame_control))) return; ps = ieee80211_has_pm(hdr->frame_control); @@ -628,7 +616,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) if (!sta->txq[i]) continue; - mtxq = (struct mt76_txq *) sta->txq[i]->drv_priv; + mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; if (!skb_queue_empty(&mtxq->retry_q)) ieee80211_schedule_txq(dev->hw, sta->txq[i]); } @@ -714,6 +702,9 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, rcu_assign_pointer(dev->wcid[idx], NULL); synchronize_rcu(); + for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++) + mt76_rx_aggr_stop(dev, wcid, i); + if (dev->drv->sta_remove) dev->drv->sta_remove(dev, vif, sta); @@ -750,7 +741,7 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, dev->drv->sta_assoc(dev, vif, sta); if (old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST) + new_state == IEEE80211_STA_NOTEXIST) mt76_sta_remove(dev, vif, sta); return 0; @@ -790,7 +781,7 @@ static void __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { if (vif->csa_active && ieee80211_csa_is_complete(vif)) - ieee80211_csa_finish(vif); + ieee80211_csa_finish(vif); } void mt76_csa_finish(struct mt76_dev *dev) @@ -878,3 +869,20 @@ int mt76_get_rate(struct mt76_dev *dev, return 0; } EXPORT_SYMBOL_GPL(mt76_get_rate); + +void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const u8 *mac) +{ + struct mt76_dev *dev = hw->priv; + + set_bit(MT76_SCANNING, &dev->state); +} +EXPORT_SYMBOL_GPL(mt76_sw_scan); + +void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt76_dev *dev = hw->priv; + + clear_bit(MT76_SCANNING, &dev->state); +} +EXPORT_SYMBOL_GPL(mt76_sw_scan_complete); diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c index dbb57b593a87..2a976688804d 100644 --- a/drivers/net/wireless/mediatek/mt76/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mcu.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2019 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76.h" diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c index 38368d19aa6f..1c974df1fe25 100644 --- a/drivers/net/wireless/mediatek/mt76/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mmio.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76.h" @@ -40,10 +29,16 @@ static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) return val; } -static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data, - int len) +static void mt76_mmio_write_copy(struct mt76_dev *dev, u32 offset, + const void *data, int len) { - __iowrite32_copy(dev->mmio.regs + offset, data, len >> 2); + __iowrite32_copy(dev->mmio.regs + offset, data, DIV_ROUND_UP(len, 4)); +} + +static void mt76_mmio_read_copy(struct mt76_dev *dev, u32 offset, + void *data, int len) +{ + __ioread32_copy(data, dev->mmio.regs + offset, DIV_ROUND_UP(len, 4)); } static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base, @@ -89,7 +84,8 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs) .rr = mt76_mmio_rr, .rmw = mt76_mmio_rmw, .wr = mt76_mmio_wr, - .copy = mt76_mmio_copy, + .write_copy = mt76_mmio_write_copy, + .read_copy = mt76_mmio_read_copy, .wr_rp = mt76_mmio_wr_rp, .rd_rp = mt76_mmio_rd_rp, .type = MT76_BUS_MMIO, diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 989386ecb5e4..570c159515a0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76_H @@ -49,8 +38,10 @@ struct mt76_bus_ops { u32 (*rr)(struct mt76_dev *dev, u32 offset); void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); - void (*copy)(struct mt76_dev *dev, u32 offset, const void *data, - int len); + void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data, + int len); + void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data, + int len); int (*wr_rp)(struct mt76_dev *dev, u32 base, const struct mt76_reg_pair *rp, int len); int (*rd_rp)(struct mt76_dev *dev, u32 base, @@ -213,6 +204,7 @@ struct mt76_wcid { u8 rx_check_pn; u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; + u16 cipher; u32 tx_info; bool sw_iv; @@ -280,7 +272,6 @@ enum { MT76_STATE_MCU_RUNNING, MT76_SCANNING, MT76_RESET, - MT76_OFFCHANNEL, MT76_REMOVED, MT76_READING_STATS, }; @@ -390,7 +381,10 @@ enum mt76u_out_ep { #define MCU_RESP_URB_SIZE 1024 struct mt76_usb { struct mutex usb_ctrl_mtx; - u8 data[32]; + union { + u8 data[32]; + __le32 reg_val; + }; struct tasklet_struct rx_tasklet; struct delayed_work stat_work; @@ -496,6 +490,8 @@ struct mt76_dev { u8 csa_complete; + ktime_t survey_time; + u32 rxfilter; union { @@ -538,7 +534,8 @@ struct mt76_rx_status { #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) -#define __mt76_wr_copy(dev, ...) (dev)->bus->copy((dev), __VA_ARGS__) +#define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__) +#define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__) #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) @@ -546,7 +543,8 @@ struct mt76_rx_status { #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) -#define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__) +#define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__) +#define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__) #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) @@ -675,7 +673,7 @@ static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) { BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); - return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data); + return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data); } static inline void mt76_insert_hdr_pad(struct sk_buff *skb) @@ -710,6 +708,7 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, bool send_bar); void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid); void mt76_txq_schedule_all(struct mt76_dev *dev); +void mt76_tx_tasklet(unsigned long data); void mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int nframes, @@ -750,8 +749,6 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); - int mt76_get_min_avg_rssi(struct mt76_dev *dev); int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -765,6 +762,10 @@ void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id); int mt76_get_rate(struct mt76_dev *dev, struct ieee80211_supported_band *sband, int idx, bool cck); +void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const u8 *mac); +void mt76_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); /* internal */ void mt76_tx_free(struct mt76_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig index e108bf881ca8..6a0080f1d91c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig @@ -5,6 +5,8 @@ config MT7603E depends on MAC80211 depends on PCI help - This adds support for MT7603E wireless PCIe devices and the WLAN core on - MT7628/MT7688 SoC devices + This adds support for MT7603E wireless PCIe devices and the WLAN core + on MT7628/MT7688 SoC devices. This family supports IEEE 802.11n 2x2 + to 300Mbps PHY rate + To compile this driver as a module, choose M here. diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c index 58e68fbdbf75..7a41cdf1c4ae 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +// SPDX-License-Identifier: ISC #include "mt7603.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c index e7ee58e3379c..e5af4f3389cc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +// SPDX-License-Identifier: ISC #include "mt7603.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c index a1bc3103cbe9..5942fe76c6e9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +// SPDX-License-Identifier: ISC #include "mt7603.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index 58dc511f93c5..24d82a20d046 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +// SPDX-License-Identifier: ISC #include "mt7603.h" #include "mac.h" @@ -63,7 +63,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) txd[0] = cpu_to_le32(val); sta = container_of(priv, struct ieee80211_sta, drv_priv); - hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE]; + hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE]; tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; ieee80211_sta_set_buffered(sta, tid, true); @@ -135,14 +135,6 @@ mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q, return 0; } -static void -mt7603_tx_tasklet(unsigned long data) -{ - struct mt7603_dev *dev = (struct mt7603_dev *)data; - - mt76_txq_schedule_all(&dev->mt76); -} - static int mt7603_poll_tx(struct napi_struct *napi, int budget) { struct mt7603_dev *dev; @@ -178,11 +170,6 @@ int mt7603_dma_init(struct mt7603_dev *dev) mt76_dma_attach(&dev->mt76); - init_waitqueue_head(&dev->mt76.mmio.mcu.wait); - skb_queue_head_init(&dev->mt76.mmio.mcu.res_q); - - tasklet_init(&dev->mt76.tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev); - mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN | diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c index 8c120e4461b0..2b6a4d8a8dc7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +// SPDX-License-Identifier: ISC #include "mt7603.h" #include "eeprom.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index 38834c7d0891..ad2ccdbe7258 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +// SPDX-License-Identifier: ISC #include <linux/etherdevice.h> #include "mt7603.h" @@ -248,8 +248,7 @@ mt7603_mac_init(struct mt7603_dev *dev) FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7603_RATE_RETRY - 1)); mt76_wr(dev, MT_AGG_ARCR, - (MT_AGG_ARCR_INIT_RATE1 | - FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) | + (FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) | MT_AGG_ARCR_RATE_DOWN_RATIO_EN | FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) | FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4))); @@ -507,7 +506,6 @@ mt7603_init_txpower(struct mt7603_dev *dev, } } - int mt7603_register_device(struct mt7603_dev *dev) { struct mt76_bus_ops *bus_ops; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 40db1cbc832d..c328192307c4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +// SPDX-License-Identifier: ISC #include <linux/etherdevice.h> #include <linux/timekeeping.h> @@ -639,9 +639,11 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta, IEEE80211_TX_RC_40_MHZ_WIDTH) continue; + if (!rates[i].idx) + continue; + rates[i].idx--; } - } w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | @@ -1014,8 +1016,9 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta, sta->rate_probe = false; } spin_unlock_bh(&dev->mt76.lock); - } else + } else { info->status.rates[0] = rs->rates[first_idx / 2]; + } info->status.rates[0].count = 0; for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { @@ -1470,8 +1473,9 @@ void mt7603_update_channel(struct mt76_dev *mdev) spin_lock_bh(&dev->mt76.cc_lock); cur_time = ktime_get_boottime(); state->cc_busy += busy; - state->cc_active += ktime_to_us(ktime_sub(cur_time, dev->survey_time)); - dev->survey_time = cur_time; + state->cc_active += ktime_to_us(ktime_sub(cur_time, + dev->mt76.survey_time)); + dev->mt76.survey_time = cur_time; spin_unlock_bh(&dev->mt76.cc_lock); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index e5d4cb6381a8..25d5b1608bc9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +// SPDX-License-Identifier: ISC #include <linux/etherdevice.h> #include <linux/platform_device.h> @@ -14,7 +14,7 @@ mt7603_start(struct ieee80211_hw *hw) struct mt7603_dev *dev = hw->priv; mt7603_mac_start(dev); - dev->survey_time = ktime_get_boottime(); + dev->mt76.survey_time = ktime_get_boottime(); set_bit(MT76_STATE_RUNNING, &dev->mt76.state); mt7603_mac_work(&dev->mt76.mac_work.work); @@ -173,7 +173,7 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def) mt76_txq_schedule_all(&dev->mt76); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, - MT7603_WATCHDOG_TIME); + msecs_to_jiffies(MT7603_WATCHDOG_TIME)); /* reset channel stats */ mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_READ_CLR_DIS); @@ -182,7 +182,7 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def) mt76_rr(dev, MT_MIB_STAT_PSCCA); mt7603_cca_stats_reset(dev); - dev->survey_time = ktime_get_boottime(); + dev->mt76.survey_time = ktime_get_boottime(); mt7603_init_edcca(dev); @@ -399,7 +399,7 @@ mt7603_ps_set_more_data(struct sk_buff *skb) { struct ieee80211_hdr *hdr; - hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE]; + hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE]; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } @@ -537,23 +537,6 @@ mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, } static void -mt7603_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - const u8 *mac) -{ - struct mt7603_dev *dev = hw->priv; - - set_bit(MT76_SCANNING, &dev->mt76.state); -} - -static void -mt7603_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct mt7603_dev *dev = hw->priv; - - clear_bit(MT76_SCANNING, &dev->mt76.state); -} - -static void mt7603_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { @@ -569,7 +552,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_txq *txq = sta->txq[params->tid]; struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; u16 tid = params->tid; - u16 *ssn = ¶ms->ssn; + u16 ssn = params->ssn; u8 ba_size = params->buf_size; struct mt76_txq *mtxq; @@ -580,7 +563,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, switch (action) { case IEEE80211_AMPDU_RX_START: - mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, params->buf_size); mt7603_mac_rx_ba_reset(dev, sta->addr, tid); break; @@ -595,11 +578,10 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1); break; case IEEE80211_AMPDU_TX_START: - mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn); + mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn); ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_STOP_CONT: @@ -647,7 +629,8 @@ mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) mt7603_mac_set_timing(dev); } -static void mt7603_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, +static void mt7603_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -682,8 +665,8 @@ const struct ieee80211_ops mt7603_ops = { .sta_state = mt76_sta_state, .set_key = mt7603_set_key, .conf_tx = mt7603_conf_tx, - .sw_scan_start = mt7603_sw_scan, - .sw_scan_complete = mt7603_sw_scan_complete, + .sw_scan_start = mt76_sw_scan, + .sw_scan_complete = mt76_sw_scan_complete, .flush = mt7603_flush, .ampdu_action = mt7603_ampdu_action, .get_txpower = mt76_get_txpower, diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c index 343ddc5543c2..02b2bd60d04d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +// SPDX-License-Identifier: ISC #include <linux/firmware.h> #include "mt7603.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h index 2c6f7b4cf0e9..257300fec4f8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h @@ -116,7 +116,6 @@ struct mt7603_dev { s8 tx_power_limit; - ktime_t survey_time; ktime_t ed_time; struct mt76_queue q_rx; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c index 4acdbf5d8968..2f2f337e2201 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +// SPDX-License-Identifier: ISC #include <linux/kernel.h> #include <linux/module.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c index b920be1f5718..68efb300c0d8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +// SPDX-License-Identifier: ISC #include <linux/kernel.h> #include <linux/module.h> @@ -9,7 +9,6 @@ static int mt76_wmac_probe(struct platform_device *pdev) { - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct mt7603_dev *dev; void __iomem *mem_base; struct mt76_dev *mdev; @@ -17,12 +16,10 @@ mt76_wmac_probe(struct platform_device *pdev) int ret; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "Failed to get device IRQ\n"); + if (irq < 0) return irq; - } - mem_base = devm_ioremap_resource(&pdev->dev, res); + mem_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mem_base)) { dev_err(&pdev->dev, "Failed to get memory resource\n"); return PTR_ERR(mem_base); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig index 2ed47b309b6e..4cabba9aa2ea 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig @@ -5,4 +5,9 @@ config MT7615E depends on MAC80211 depends on PCI help - This adds support for MT7615-based wireless PCIe devices. + This adds support for MT7615-based wireless PCIe devices, + which support concurrent dual-band operation at both 5GHz + and 2.4GHz, IEEE 802.11ac 4x4:4SS 1733Mbps PHY rate, wave2 + MU-MIMO up to 4 users/group and 160MHz channels. + + To compile this driver as a module, choose M here. diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile index 6397552f6ee3..5aaac69849d6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile @@ -2,4 +2,5 @@ obj-$(CONFIG_MT7615E) += mt7615e.o -mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o +mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \ + debugfs.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c new file mode 100644 index 000000000000..2428a4659a1c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: ISC + +#include "mt7615.h" + +static int +mt7615_radar_pattern_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + + return mt7615_mcu_rdd_send_pattern(dev); +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_pattern, NULL, + mt7615_radar_pattern_set, "%lld\n"); + +static int +mt7615_scs_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + + mt7615_mac_set_scs(dev, val); + + return 0; +} + +static int +mt7615_scs_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + *val = dev->scs_en; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get, + mt7615_scs_set, "%lld\n"); + +static int +mt7615_radio_read(struct seq_file *s, void *data) +{ + struct mt7615_dev *dev = dev_get_drvdata(s->private); + + seq_printf(s, "Sensitivity: ofdm=%d cck=%d\n", + dev->ofdm_sensitivity, dev->cck_sensitivity); + seq_printf(s, "False CCA: ofdm=%d cck=%d\n", + dev->false_cca_ofdm, dev->false_cca_cck); + + return 0; +} + +static int mt7615_read_temperature(struct seq_file *s, void *data) +{ + struct mt7615_dev *dev = dev_get_drvdata(s->private); + int temp; + + /* cpu */ + temp = mt7615_mcu_get_temperature(dev, 0); + seq_printf(s, "Temperature: %d\n", temp); + + return 0; +} + +int mt7615_init_debugfs(struct mt7615_dev *dev) +{ + struct dentry *dir; + + dir = mt76_register_debugfs(&dev->mt76); + if (!dir) + return -ENOMEM; + + debugfs_create_file("scs", 0600, dir, dev, &fops_scs); + debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir, + mt7615_radio_read); + debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern); + /* test pattern knobs */ + debugfs_create_u8("pattern_len", 0600, dir, + &dev->radar_pattern.n_pulses); + debugfs_create_u32("pulse_period", 0600, dir, + &dev->radar_pattern.period); + debugfs_create_u16("pulse_width", 0600, dir, + &dev->radar_pattern.width); + debugfs_create_u16("pulse_power", 0600, dir, + &dev->radar_pattern.power); + debugfs_create_file("radar_trigger", 0200, dir, dev, + &fops_radar_pattern); + debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir, + mt7615_read_temperature); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index 6a70273d4a69..fe532cecbbdd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -76,7 +76,7 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, mt7615_mac_tx_free(dev, skb); break; case PKT_TYPE_RX_EVENT: - mt76_mcu_rx_event(&dev->mt76, skb); + mt7615_mcu_rx_event(dev, skb); break; case PKT_TYPE_NORMAL: if (!mt7615_mac_fill_rx(dev, skb)) { @@ -90,13 +90,6 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, } } -static void mt7615_tx_tasklet(unsigned long data) -{ - struct mt7615_dev *dev = (struct mt7615_dev *)data; - - mt76_txq_schedule_all(&dev->mt76); -} - static int mt7615_poll_tx(struct napi_struct *napi, int budget) { static const u8 queue_map[] = { @@ -128,9 +121,6 @@ int mt7615_dma_init(struct mt7615_dev *dev) mt76_dma_attach(&dev->mt76); - tasklet_init(&dev->mt76.tx_tasklet, mt7615_tx_tasklet, - (unsigned long)dev); - mt76_wr(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE | MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN | diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index dc94f52e6e8b..515bb58e19fd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -154,6 +154,42 @@ int mt7615_eeprom_get_power_index(struct mt7615_dev *dev, return index; } +static void mt7615_apply_cal_free_data(struct mt7615_dev *dev) +{ + static const u16 ical[] = { + 0x53, 0x54, 0x55, 0x56, 0x57, 0x5c, 0x5d, 0x62, 0x63, 0x68, + 0x69, 0x6e, 0x6f, 0x73, 0x74, 0x78, 0x79, 0x82, 0x83, 0x87, + 0x88, 0x8c, 0x8d, 0x91, 0x92, 0x96, 0x97, 0x9b, 0x9c, 0xa0, + 0xa1, 0xaa, 0xab, 0xaf, 0xb0, 0xb4, 0xb5, 0xb9, 0xba, 0xf4, + 0xf7, 0xff, + 0x140, 0x141, 0x145, 0x146, 0x14a, 0x14b, 0x154, 0x155, 0x159, + 0x15a, 0x15e, 0x15f, 0x163, 0x164, 0x168, 0x169, 0x16d, 0x16e, + 0x172, 0x173, 0x17c, 0x17d, 0x181, 0x182, 0x186, 0x187, 0x18b, + 0x18c + }; + static const u16 ical_nocheck[] = { + 0x110, 0x111, 0x112, 0x113, 0x114, 0x115, 0x116, 0x117, 0x118, + 0x1b5, 0x1b6, 0x1b7, 0x3ac, 0x3ad, 0x3ae, 0x3af, 0x3b0, 0x3b1, + 0x3b2 + }; + u8 *eeprom = dev->mt76.eeprom.data; + u8 *otp = dev->mt76.otp.data; + int i; + + if (!otp) + return; + + for (i = 0; i < ARRAY_SIZE(ical); i++) + if (!otp[ical[i]]) + return; + + for (i = 0; i < ARRAY_SIZE(ical); i++) + eeprom[ical[i]] = otp[ical[i]]; + + for (i = 0; i < ARRAY_SIZE(ical_nocheck); i++) + eeprom[ical_nocheck[i]] = otp[ical_nocheck[i]]; +} + int mt7615_eeprom_init(struct mt7615_dev *dev) { int ret; @@ -166,6 +202,8 @@ int mt7615_eeprom_init(struct mt7615_dev *dev) if (ret && dev->mt76.otp.data) memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data, MT7615_EEPROM_SIZE); + else + mt7615_apply_cal_free_data(dev); mt7615_eeprom_parse_hw_cap(dev); memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 859de2454ec6..1104e4c8aaa6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -20,10 +20,24 @@ static void mt7615_phy_init(struct mt7615_dev *dev) static void mt7615_mac_init(struct mt7615_dev *dev) { - /* enable band 0 clk */ - mt76_rmw(dev, MT_CFG_CCR, - MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN, - MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN); + u32 val; + + /* enable band 0/1 clk */ + mt76_set(dev, MT_CFG_CCR, + MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN | + MT_CFG_CCR_MAC_D1_1X_GC_EN | MT_CFG_CCR_MAC_D1_2X_GC_EN); + + val = mt76_rmw(dev, MT_TMAC_TRCR0, + MT_TMAC_TRCR_CCA_SEL | MT_TMAC_TRCR_SEC_CCA_SEL, + FIELD_PREP(MT_TMAC_TRCR_CCA_SEL, 2) | + FIELD_PREP(MT_TMAC_TRCR_SEC_CCA_SEL, 0)); + mt76_wr(dev, MT_TMAC_TRCR1, val); + + val = MT_AGG_ACR_PKT_TIME_EN | MT_AGG_ACR_NO_BA_AR_RULE | + FIELD_PREP(MT_AGG_ACR_CFEND_RATE, 0x49) | /* 24M */ + FIELD_PREP(MT_AGG_ACR_BAR_RATE, 0x4b); /* 6M */ + mt76_wr(dev, MT_AGG_ACR0, val); + mt76_wr(dev, MT_AGG_ACR1, val); mt76_rmw_field(dev, MT_TMAC_CTCR0, MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f); @@ -36,6 +50,7 @@ static void mt7615_mac_init(struct mt7615_dev *dev) MT_TMAC_CTCR0_INS_DDLMT_EN); mt7615_mcu_set_rts_thresh(dev, 0x92b); + mt7615_mac_set_scs(dev, false); mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS, MT_AGG_SCR_NLNAV_MID_PTEC_DIS); @@ -45,11 +60,19 @@ static void mt7615_mac_init(struct mt7615_dev *dev) mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP | FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072)); - mt76_wr(dev, MT_AGG_ARUCR, FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7)); + mt76_wr(dev, MT_AGG_ARUCR, + FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), 2) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1)); + mt76_wr(dev, MT_AGG_ARDCR, - FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 0) | - FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), - max_t(int, 0, MT7615_RATE_RETRY - 2)) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7615_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7615_RATE_RETRY - 1) | FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) | FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7615_RATE_RETRY - 1) | FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7615_RATE_RETRY - 1) | @@ -58,8 +81,7 @@ static void mt7615_mac_init(struct mt7615_dev *dev) FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1)); mt76_wr(dev, MT_AGG_ARCR, - (MT_AGG_ARCR_INIT_RATE1 | - FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) | + (FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) | MT_AGG_ARCR_RATE_DOWN_RATIO_EN | FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) | FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4))); @@ -155,17 +177,6 @@ static const struct ieee80211_iface_combination if_comb[] = { } }; -static int mt7615_init_debugfs(struct mt7615_dev *dev) -{ - struct dentry *dir; - - dir = mt76_register_debugfs(&dev->mt76); - if (!dir) - return -ENOMEM; - - return 0; -} - static void mt7615_init_txpower(struct mt7615_dev *dev, struct ieee80211_supported_band *sband) @@ -208,6 +219,30 @@ mt7615_init_txpower(struct mt7615_dev *dev, } } +static void +mt7615_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct mt7615_dev *dev = hw->priv; + struct cfg80211_chan_def *chandef = &dev->mt76.chandef; + + if (request->dfs_region == dev->mt76.region) + return; + + dev->mt76.region = request->dfs_region; + + if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) + return; + + mt7615_dfs_stop_radar_detector(dev); + if (request->dfs_region == NL80211_DFS_UNSET) + mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0, + MT_RX_SEL0, 0); + else + mt7615_dfs_start_radar_detector(dev); +} + int mt7615_register_device(struct mt7615_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); @@ -230,6 +265,8 @@ int mt7615_register_device(struct mt7615_dev *dev) wiphy->iface_combinations = if_comb; wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); + wiphy->reg_notifier = mt7615_regd_notifier; + wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN); @@ -243,6 +280,7 @@ int mt7615_register_device(struct mt7615_dev *dev) IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ; dev->mt76.chainmask = 0x404; dev->mt76.antenna_mask = 0xf; + dev->dfs_state = -1; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | #ifdef CONFIG_MAC80211_MESH diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 1eb0e9c9970c..e07ce2c10013 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -232,11 +232,9 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, struct mt76_txwi_cache *t; struct mt7615_dev *dev; struct mt7615_txp *txp; - u8 *txwi_ptr; - txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi); - txp = (struct mt7615_txp *)(txwi_ptr + MT_TXD_SIZE); dev = container_of(mdev, struct mt7615_dev, mt76); + txp = mt7615_txwi_to_txp(mdev, e->txwi); spin_lock_bh(&dev->token_lock); t = idr_remove(&dev->token, le16_to_cpu(txp->token)); @@ -248,12 +246,13 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, mt76_tx_complete_skb(mdev, e->skb); } -u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev, - const struct ieee80211_tx_rate *rate, - bool stbc, u8 *bw) +static u16 +mt7615_mac_tx_rate_val(struct mt7615_dev *dev, + const struct ieee80211_tx_rate *rate, + bool stbc, u8 *bw) { u8 phy, nss, rate_idx; - u16 rateval; + u16 rateval = 0; *bw = 0; @@ -291,12 +290,14 @@ u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev, rate_idx = val & 0xff; } - rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | - FIELD_PREP(MT_TX_RATE_MODE, phy) | - FIELD_PREP(MT_TX_RATE_NSS, nss - 1)); - - if (stbc && nss == 1) + if (stbc && nss == 1) { + nss++; rateval |= MT_TX_RATE_STBC; + } + + rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | + FIELD_PREP(MT_TX_RATE_MODE, phy) | + FIELD_PREP(MT_TX_RATE_NSS, nss - 1)); return rateval; } @@ -309,9 +310,10 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *rate = &info->control.rates[0]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + bool multicast = is_multicast_ether_addr(hdr->addr1); struct ieee80211_vif *vif = info->control.vif; int tx_count = 8; - u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0; + u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; __le16 fc = hdr->frame_control; u16 seqno = 0; u32 val; @@ -320,6 +322,7 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; omac_idx = mvif->omac_idx; + wmm_idx = mvif->wmm_idx; } if (sta) { @@ -331,8 +334,9 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; - if (ieee80211_is_data(fc)) { - q_idx = skb_get_queue_mapping(skb); + if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { + q_idx = wmm_idx * MT7615_MAX_WMM_SETS + + skb_get_queue_mapping(skb); p_fmt = MT_TX_TYPE_CT; } else if (ieee80211_is_beacon(fc)) { q_idx = MT_LMAC_BCN0; @@ -360,8 +364,18 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | - FIELD_PREP(MT_TXD2_MULTICAST, - is_multicast_ether_addr(hdr->addr1)); + FIELD_PREP(MT_TXD2_MULTICAST, multicast); + if (key) { + if (multicast && ieee80211_is_robust_mgmt_frame(skb) && + key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + val |= MT_TXD2_BIP; + txwi[3] = 0; + } else { + txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME); + } + } else { + txwi[3] = 0; + } txwi[2] = cpu_to_le32(val); if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) @@ -418,14 +432,11 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, } val |= FIELD_PREP(MT_TXD3_SEQ, seqno); - txwi[3] = cpu_to_le32(val); + txwi[3] |= cpu_to_le32(val); if (info->flags & IEEE80211_TX_CTL_NO_ACK) txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); - if (key) - txwi[3] |= cpu_to_le32(MT_TXD3_PROTECT_FRAME); - txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) | FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); @@ -436,16 +447,318 @@ void mt7615_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t) { struct mt7615_txp *txp; - u8 *txwi; int i; - txwi = mt76_get_txwi_ptr(dev, t); - txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE); + txp = mt7615_txwi_to_txp(dev, t); for (i = 1; i < txp->nbuf; i++) dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); } +static u32 mt7615_mac_wtbl_addr(int wcid) +{ + return MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE; +} + +void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta, + struct ieee80211_tx_rate *probe_rate, + struct ieee80211_tx_rate *rates) +{ + struct ieee80211_tx_rate *ref; + int wcid = sta->wcid.idx; + u32 addr = mt7615_mac_wtbl_addr(wcid); + bool stbc = false; + int n_rates = sta->n_rates; + u8 bw, bw_prev, bw_idx = 0; + u16 val[4]; + u16 probe_val; + u32 w5, w27; + bool rateset; + int i, k; + + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) + return; + + for (i = n_rates; i < 4; i++) + rates[i] = rates[n_rates - 1]; + + rateset = !(sta->rate_set_tsf & BIT(0)); + memcpy(sta->rateset[rateset].rates, rates, + sizeof(sta->rateset[rateset].rates)); + if (probe_rate) { + sta->rateset[rateset].probe_rate = *probe_rate; + ref = &sta->rateset[rateset].probe_rate; + } else { + sta->rateset[rateset].probe_rate.idx = -1; + ref = &sta->rateset[rateset].rates[0]; + } + + rates = sta->rateset[rateset].rates; + for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { + /* + * We don't support switching between short and long GI + * within the rate set. For accurate tx status reporting, we + * need to make sure that flags match. + * For improved performance, avoid duplicate entries by + * decrementing the MCS index if necessary + */ + if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) + rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; + + for (k = 0; k < i; k++) { + if (rates[i].idx != rates[k].idx) + continue; + if ((rates[i].flags ^ rates[k].flags) & + (IEEE80211_TX_RC_40_MHZ_WIDTH | + IEEE80211_TX_RC_80_MHZ_WIDTH | + IEEE80211_TX_RC_160_MHZ_WIDTH)) + continue; + + if (!rates[i].idx) + continue; + + rates[i].idx--; + } + } + + val[0] = mt7615_mac_tx_rate_val(dev, &rates[0], stbc, &bw); + bw_prev = bw; + + if (probe_rate) { + probe_val = mt7615_mac_tx_rate_val(dev, probe_rate, stbc, &bw); + if (bw) + bw_idx = 1; + else + bw_prev = 0; + } else { + probe_val = val[0]; + } + + val[1] = mt7615_mac_tx_rate_val(dev, &rates[1], stbc, &bw); + if (bw_prev) { + bw_idx = 3; + bw_prev = bw; + } + + val[2] = mt7615_mac_tx_rate_val(dev, &rates[2], stbc, &bw); + if (bw_prev) { + bw_idx = 5; + bw_prev = bw; + } + + val[3] = mt7615_mac_tx_rate_val(dev, &rates[3], stbc, &bw); + if (bw_prev) + bw_idx = 7; + + w27 = mt76_rr(dev, addr + 27 * 4); + w27 &= ~MT_WTBL_W27_CC_BW_SEL; + w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, bw); + + w5 = mt76_rr(dev, addr + 5 * 4); + w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | + MT_WTBL_W5_MPDU_OK_COUNT | + MT_WTBL_W5_MPDU_FAIL_COUNT | + MT_WTBL_W5_RATE_IDX); + w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, bw) | + FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, bw_idx ? bw_idx - 1 : 7); + + mt76_wr(dev, MT_WTBL_RIUCR0, w5); + + mt76_wr(dev, MT_WTBL_RIUCR1, + FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1])); + + mt76_wr(dev, MT_WTBL_RIUCR2, + FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2])); + + mt76_wr(dev, MT_WTBL_RIUCR3, + FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3])); + + mt76_wr(dev, MT_WTBL_UPDATE, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | + MT_WTBL_UPDATE_RATE_UPDATE | + MT_WTBL_UPDATE_TX_COUNT_CLEAR); + + mt76_wr(dev, addr + 27 * 4, w27); + + mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ + sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset; + + if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); + + sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; + sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; +} + +static enum mt7615_cipher_type +mt7615_mac_get_cipher(int cipher) +{ + switch (cipher) { + case WLAN_CIPHER_SUITE_WEP40: + return MT_CIPHER_WEP40; + case WLAN_CIPHER_SUITE_WEP104: + return MT_CIPHER_WEP104; + case WLAN_CIPHER_SUITE_TKIP: + return MT_CIPHER_TKIP; + case WLAN_CIPHER_SUITE_AES_CMAC: + return MT_CIPHER_BIP_CMAC_128; + case WLAN_CIPHER_SUITE_CCMP: + return MT_CIPHER_AES_CCMP; + case WLAN_CIPHER_SUITE_CCMP_256: + return MT_CIPHER_CCMP_256; + case WLAN_CIPHER_SUITE_GCMP: + return MT_CIPHER_GCMP; + case WLAN_CIPHER_SUITE_GCMP_256: + return MT_CIPHER_GCMP_256; + case WLAN_CIPHER_SUITE_SMS4: + return MT_CIPHER_WAPI; + default: + return MT_CIPHER_NONE; + } +} + +static int +mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, + enum mt7615_cipher_type cipher, + enum set_key_cmd cmd) +{ + u32 addr = mt7615_mac_wtbl_addr(wcid->idx) + 30 * 4; + u8 data[32] = {}; + + if (key->keylen > sizeof(data)) + return -EINVAL; + + mt76_rr_copy(dev, addr, data, sizeof(data)); + if (cmd == SET_KEY) { + if (cipher == MT_CIPHER_TKIP) { + /* Rx/Tx MIC keys are swapped */ + memcpy(data + 16, key->key + 24, 8); + memcpy(data + 24, key->key + 16, 8); + } + if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) + memmove(data + 16, data, 16); + if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) + memcpy(data, key->key, key->keylen); + else if (cipher == MT_CIPHER_BIP_CMAC_128) + memcpy(data + 16, key->key, 16); + } else { + if (wcid->cipher & ~BIT(cipher)) { + if (cipher != MT_CIPHER_BIP_CMAC_128) + memmove(data, data + 16, 16); + memset(data + 16, 0, 16); + } else { + memset(data, 0, sizeof(data)); + } + } + mt76_wr_copy(dev, addr, data, sizeof(data)); + + return 0; +} + +static int +mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, + enum mt7615_cipher_type cipher, int keyidx, + enum set_key_cmd cmd) +{ + u32 addr = mt7615_mac_wtbl_addr(wcid->idx), w0, w1; + + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) + return -ETIMEDOUT; + + w0 = mt76_rr(dev, addr); + w1 = mt76_rr(dev, addr + 4); + if (cmd == SET_KEY) { + w0 |= MT_WTBL_W0_RX_KEY_VALID | + FIELD_PREP(MT_WTBL_W0_RX_IK_VALID, + cipher == MT_CIPHER_BIP_CMAC_128); + if (cipher != MT_CIPHER_BIP_CMAC_128 || + !wcid->cipher) + w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); + } else { + if (!(wcid->cipher & ~BIT(cipher))) + w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | + MT_WTBL_W0_KEY_IDX); + if (cipher == MT_CIPHER_BIP_CMAC_128) + w0 &= ~MT_WTBL_W0_RX_IK_VALID; + } + mt76_wr(dev, MT_WTBL_RICR0, w0); + mt76_wr(dev, MT_WTBL_RICR1, w1); + + mt76_wr(dev, MT_WTBL_UPDATE, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid->idx) | + MT_WTBL_UPDATE_RXINFO_UPDATE); + + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) + return -ETIMEDOUT; + + return 0; +} + +static void +mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid, + enum mt7615_cipher_type cipher, + enum set_key_cmd cmd) +{ + u32 addr = mt7615_mac_wtbl_addr(wcid->idx); + + if (cmd == SET_KEY) { + if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) + mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, + FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); + } else { + if (cipher != MT_CIPHER_BIP_CMAC_128 && + wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128)) + mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, + FIELD_PREP(MT_WTBL_W2_KEY_TYPE, + MT_CIPHER_BIP_CMAC_128)); + else if (!(wcid->cipher & ~BIT(cipher))) + mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); + } +} + +int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, + enum set_key_cmd cmd) +{ + enum mt7615_cipher_type cipher; + int err; + + cipher = mt7615_mac_get_cipher(key->cipher); + if (cipher == MT_CIPHER_NONE) + return -EOPNOTSUPP; + + spin_lock_bh(&dev->mt76.lock); + + mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd); + err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd); + if (err < 0) + goto out; + + err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, + cmd); + if (err < 0) + goto out; + + if (cmd == SET_KEY) + wcid->cipher |= BIT(cipher); + else + wcid->cipher &= ~BIT(cipher); + +out: + spin_unlock_bh(&dev->mt76.lock); + + return err; +} + int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, @@ -469,9 +782,9 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { spin_lock_bh(&dev->mt76.lock); - msta->rate_probe = true; - mt7615_mcu_set_rates(dev, msta, &info->control.rates[0], + mt7615_mac_set_rates(dev, msta, &info->control.rates[0], msta->rates); + msta->rate_probe = true; spin_unlock_bh(&dev->mt76.lock); } @@ -523,9 +836,13 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, struct ieee80211_tx_info *info, __le32 *txs_data) { struct ieee80211_supported_band *sband; - int i, idx, count, final_idx = 0; + struct mt7615_rate_set *rs; + int first_idx = 0, last_idx; + int i, idx, count; bool fixed_rate, ack_timeout; bool probe, ampdu, cck = false; + bool rs_idx; + u32 rate_set_tsf; u32 final_rate, final_rate_flags, final_nss, txs; fixed_rate = info->status.rates[0].count; @@ -536,6 +853,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, txs = le32_to_cpu(txs_data[3]); count = FIELD_GET(MT_TXS3_TX_COUNT, txs); + last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs); txs = le32_to_cpu(txs_data[0]); final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); @@ -557,38 +875,57 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; + first_idx = max_t(int, 0, last_idx - (count + 1) / MT7615_RATE_RETRY); + if (fixed_rate && !probe) { info->status.rates[0].count = count; + i = 0; goto out; } - for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) { - int cur_count = min_t(int, count, 2 * MT7615_RATE_RETRY); + rate_set_tsf = READ_ONCE(sta->rate_set_tsf); + rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) - + rate_set_tsf) < 1000000); + rs_idx ^= rate_set_tsf & BIT(0); + rs = &sta->rateset[rs_idx]; - if (!i && probe) { - cur_count = 1; - } else { - info->status.rates[i] = sta->rates[idx]; - idx++; - } + if (!first_idx && rs->probe_rate.idx >= 0) { + info->status.rates[0] = rs->probe_rate; - if (i && info->status.rates[i].idx < 0) { - info->status.rates[i - 1].count += count; - break; + spin_lock_bh(&dev->mt76.lock); + if (sta->rate_probe) { + mt7615_mac_set_rates(dev, sta, NULL, sta->rates); + sta->rate_probe = false; } + spin_unlock_bh(&dev->mt76.lock); + } else { + info->status.rates[0] = rs->rates[first_idx / 2]; + } + info->status.rates[0].count = 0; - if (!count) { - info->status.rates[i].idx = -1; - break; - } + for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { + struct ieee80211_tx_rate *cur_rate; + int cur_count; - info->status.rates[i].count = cur_count; - final_idx = i; + cur_rate = &rs->rates[idx / 2]; + cur_count = min_t(int, MT7615_RATE_RETRY, count); count -= cur_count; + + if (idx && (cur_rate->idx != info->status.rates[i].idx || + cur_rate->flags != info->status.rates[i].flags)) { + i++; + if (i == ARRAY_SIZE(info->status.rates)) + break; + + info->status.rates[i] = *cur_rate; + info->status.rates[i].count = 0; + } + + info->status.rates[i].count += cur_count; } out: - final_rate_flags = info->status.rates[final_idx].flags; + final_rate_flags = info->status.rates[i].flags; switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { case MT_PHY_TYPE_CCK: @@ -613,6 +950,10 @@ out: break; case MT_PHY_TYPE_VHT: final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate); + + if ((final_rate & MT_TX_RATE_STBC) && final_nss) + final_nss--; + final_rate_flags |= IEEE80211_TX_RC_VHT_MCS; final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4); break; @@ -620,8 +961,8 @@ out: return false; } - info->status.rates[final_idx].idx = final_rate; - info->status.rates[final_idx].flags = final_rate_flags; + info->status.rates[i].idx = final_rate; + info->status.rates[i].flags = final_rate_flags; return true; } @@ -642,16 +983,6 @@ static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev, if (skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { - spin_lock_bh(&dev->mt76.lock); - if (sta->rate_probe) { - mt7615_mcu_set_rates(dev, sta, NULL, - sta->rates); - sta->rate_probe = false; - } - spin_unlock_bh(&dev->mt76.lock); - } - if (!mt7615_fill_txs(dev, sta, info, txs_data)) { ieee80211_tx_info_clear_status(info); info->status.rates[0].idx = -1; @@ -735,6 +1066,198 @@ void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) dev_kfree_skb(skb); } +static void +mt7615_mac_set_default_sensitivity(struct mt7615_dev *dev) +{ + mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR, + MT_WF_PHY_B0_PD_OFDM_MASK, + MT_WF_PHY_B0_PD_OFDM(0x13c)); + mt76_rmw(dev, MT_WF_PHY_B1_MIN_PRI_PWR, + MT_WF_PHY_B1_PD_OFDM_MASK, + MT_WF_PHY_B1_PD_OFDM(0x13c)); + + mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD, + MT_WF_PHY_B0_PD_CCK_MASK, + MT_WF_PHY_B0_PD_CCK(0x92)); + mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD, + MT_WF_PHY_B1_PD_CCK_MASK, + MT_WF_PHY_B1_PD_CCK(0x92)); + + dev->ofdm_sensitivity = -98; + dev->cck_sensitivity = -110; + dev->last_cca_adj = jiffies; +} + +void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable) +{ + mutex_lock(&dev->mt76.mutex); + + if (dev->scs_en == enable) + goto out; + + if (enable) { + /* DBDC not supported */ + mt76_set(dev, MT_WF_PHY_B0_MIN_PRI_PWR, + MT_WF_PHY_B0_PD_BLK); + if (is_mt7622(&dev->mt76)) { + mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7 << 8); + mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7); + } + } else { + mt76_clear(dev, MT_WF_PHY_B0_MIN_PRI_PWR, + MT_WF_PHY_B0_PD_BLK); + mt76_clear(dev, MT_WF_PHY_B1_MIN_PRI_PWR, + MT_WF_PHY_B1_PD_BLK); + } + + mt7615_mac_set_default_sensitivity(dev); + dev->scs_en = enable; + +out: + mutex_unlock(&dev->mt76.mutex); +} + +void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev) +{ + mt76_clear(dev, MT_WF_PHY_R0_B0_PHYMUX_5, GENMASK(22, 20)); + mt76_set(dev, MT_WF_PHY_R0_B0_PHYMUX_5, BIT(22) | BIT(20)); +} + +static void +mt7615_mac_adjust_sensitivity(struct mt7615_dev *dev, + u32 rts_err_rate, bool ofdm) +{ + int false_cca = ofdm ? dev->false_cca_ofdm : dev->false_cca_cck; + u16 def_th = ofdm ? -98 : -110; + bool update = false; + s8 *sensitivity; + int signal; + + sensitivity = ofdm ? &dev->ofdm_sensitivity : &dev->cck_sensitivity; + signal = mt76_get_min_avg_rssi(&dev->mt76); + if (!signal) { + mt7615_mac_set_default_sensitivity(dev); + return; + } + + signal = min(signal, -72); + if (false_cca > 500) { + if (rts_err_rate > MT_FRAC(40, 100)) + return; + + /* decrease coverage */ + if (*sensitivity == def_th && signal > -90) { + *sensitivity = -90; + update = true; + } else if (*sensitivity + 2 < signal) { + *sensitivity += 2; + update = true; + } + } else if ((false_cca > 0 && false_cca < 50) || + rts_err_rate > MT_FRAC(60, 100)) { + /* increase coverage */ + if (*sensitivity - 2 >= def_th) { + *sensitivity -= 2; + update = true; + } + } + + if (*sensitivity > signal) { + *sensitivity = signal; + update = true; + } + + if (update) { + u16 val; + + if (ofdm) { + /* DBDC not supported */ + val = *sensitivity * 2 + 512; + mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR, + MT_WF_PHY_B0_PD_OFDM_MASK, + MT_WF_PHY_B0_PD_OFDM(val)); + } else { + val = *sensitivity + 256; + mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD, + MT_WF_PHY_B0_PD_CCK_MASK, + MT_WF_PHY_B0_PD_CCK(val)); + mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD, + MT_WF_PHY_B1_PD_CCK_MASK, + MT_WF_PHY_B1_PD_CCK(val)); + } + dev->last_cca_adj = jiffies; + } +} + +static void +mt7615_mac_scs_check(struct mt7615_dev *dev) +{ + u32 val, rts_cnt = 0, rts_retries_cnt = 0, rts_err_rate = 0; + u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm; + int i; + + if (!dev->scs_en) + return; + + for (i = 0; i < 4; i++) { + u32 data; + + val = mt76_rr(dev, MT_MIB_MB_SDR0(i)); + data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); + if (data > rts_retries_cnt) { + rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); + rts_retries_cnt = data; + } + } + + val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS0); + pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val); + pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val); + + val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS5); + mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val); + mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val); + + dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; + dev->false_cca_cck = pd_cck - mdrdy_cck; + mt7615_mac_cca_stats_reset(dev); + + if (rts_cnt + rts_retries_cnt) + rts_err_rate = MT_FRAC(rts_retries_cnt, + rts_cnt + rts_retries_cnt); + + /* cck */ + mt7615_mac_adjust_sensitivity(dev, rts_err_rate, false); + /* ofdm */ + mt7615_mac_adjust_sensitivity(dev, rts_err_rate, true); + + if (time_after(jiffies, dev->last_cca_adj + 10 * HZ)) + mt7615_mac_set_default_sensitivity(dev); +} + +void mt7615_update_channel(struct mt76_dev *mdev) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct mt76_channel_state *state; + ktime_t cur_time; + u32 busy; + + if (!test_bit(MT76_STATE_RUNNING, &mdev->state)) + return; + + state = mt76_channel_state(mdev, mdev->chandef.chan); + /* TODO: add DBDC support */ + busy = mt76_get_field(dev, MT_MIB_SDR16(0), MT_MIB_BUSY_MASK); + + spin_lock_bh(&mdev->cc_lock); + cur_time = ktime_get_boottime(); + state->cc_busy += busy; + state->cc_active += ktime_to_us(ktime_sub(cur_time, + mdev->survey_time)); + mdev->survey_time = cur_time; + spin_unlock_bh(&mdev->cc_lock); +} + void mt7615_mac_work(struct work_struct *work) { struct mt7615_dev *dev; @@ -742,7 +1265,103 @@ void mt7615_mac_work(struct work_struct *work) dev = (struct mt7615_dev *)container_of(work, struct mt76_dev, mac_work.work); + mutex_lock(&dev->mt76.mutex); + mt7615_update_channel(&dev->mt76); + if (++dev->mac_work_count == 5) { + mt7615_mac_scs_check(dev); + dev->mac_work_count = 0; + } + mutex_unlock(&dev->mt76.mutex); + mt76_tx_status_check(&dev->mt76, NULL, false); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, MT7615_WATCHDOG_TIME); } + +int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev) +{ + struct cfg80211_chan_def *chandef = &dev->mt76.chandef; + int err; + + err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD0, + MT_RX_SEL0, 0); + if (err < 0) + return err; + + if (chandef->width == NL80211_CHAN_WIDTH_160 || + chandef->width == NL80211_CHAN_WIDTH_80P80) + err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD1, + MT_RX_SEL0, 0); + return err; +} + +static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain) +{ + int err; + + err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0); + if (err < 0) + return err; + + return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, + MT_RX_SEL0, 1); +} + +int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev) +{ + struct cfg80211_chan_def *chandef = &dev->mt76.chandef; + int err; + + /* start CAC */ + err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, MT_HW_RDD0, + MT_RX_SEL0, 0); + if (err < 0) + return err; + + /* TODO: DBDC support */ + + err = mt7615_dfs_start_rdd(dev, MT_HW_RDD0); + if (err < 0) + return err; + + if (chandef->width == NL80211_CHAN_WIDTH_160 || + chandef->width == NL80211_CHAN_WIDTH_80P80) { + err = mt7615_dfs_start_rdd(dev, MT_HW_RDD1); + if (err < 0) + return err; + } + + return 0; +} + +int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev) +{ + struct cfg80211_chan_def *chandef = &dev->mt76.chandef; + int err; + + if (dev->mt76.region == NL80211_DFS_UNSET) + return 0; + + if (test_bit(MT76_SCANNING, &dev->mt76.state)) + return 0; + + if (dev->dfs_state == chandef->chan->dfs_state) + return 0; + + dev->dfs_state = chandef->chan->dfs_state; + + if (chandef->chan->flags & IEEE80211_CHAN_RADAR) { + if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) + return mt7615_dfs_start_radar_detector(dev); + else + return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0, + MT_RX_SEL0, 0); + } else { + err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, + MT_HW_RDD0, MT_RX_SEL0, 0); + if (err < 0) + return err; + + return mt7615_dfs_stop_radar_detector(dev); + } +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h index b00ce8db58e9..38695d4f92e2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h @@ -302,4 +302,32 @@ struct mt7615_tx_free { #define MT_TXS6_F1_RCPI_1 GENMASK(15, 8) #define MT_TXS6_F1_RCPI_0 GENMASK(7, 0) +enum mt7615_cipher_type { + MT_CIPHER_NONE, + MT_CIPHER_WEP40, + MT_CIPHER_TKIP, + MT_CIPHER_TKIP_NO_MIC, + MT_CIPHER_AES_CCMP, + MT_CIPHER_WEP104, + MT_CIPHER_BIP_CMAC_128, + MT_CIPHER_WEP128, + MT_CIPHER_WAPI, + MT_CIPHER_CCMP_256 = 10, + MT_CIPHER_GCMP, + MT_CIPHER_GCMP_256, +}; + +static inline struct mt7615_txp * +mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t) +{ + u8 *txwi; + + if (!t) + return NULL; + + txwi = mt76_get_txwi_ptr(dev, t); + + return (struct mt7615_txp *)(txwi + MT_TXD_SIZE); +} + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index b4d6af812c54..87c748715b5d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -16,6 +16,7 @@ static int mt7615_start(struct ieee80211_hw *hw) { struct mt7615_dev *dev = hw->priv; + dev->mt76.survey_time = ktime_get_boottime(); set_bit(MT76_STATE_RUNNING, &dev->mt76.state); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, MT7615_WATCHDOG_TIME); @@ -85,9 +86,9 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, } mvif->omac_idx = idx; - /* TODO: DBDC support. Use band 0 and wmm 0 for now */ + /* TODO: DBDC support. Use band 0 for now */ mvif->band_idx = 0; - mvif->wmm_idx = 0; + mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS; ret = mt7615_mcu_set_dev_info(dev, vif, 1); if (ret) @@ -135,20 +136,32 @@ static int mt7615_set_channel(struct mt7615_dev *dev) int ret; cancel_delayed_work_sync(&dev->mt76.mac_work); + + mutex_lock(&dev->mt76.mutex); set_bit(MT76_RESET, &dev->mt76.state); + mt7615_dfs_check_channel(dev); + mt76_set_channel(&dev->mt76); ret = mt7615_mcu_set_channel(dev); if (ret) - return ret; + goto out; + + ret = mt7615_dfs_init_radar_detector(dev); + mt7615_mac_cca_stats_reset(dev); + dev->mt76.survey_time = ktime_get_boottime(); + /* TODO: add DBDC support */ + mt76_rr(dev, MT_MIB_SDR16(0)); +out: clear_bit(MT76_RESET, &dev->mt76.state); + mutex_unlock(&dev->mt76.mutex); mt76_txq_schedule_all(&dev->mt76); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, MT7615_WATCHDOG_TIME); - return 0; + return ret; } static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, @@ -172,18 +185,34 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) return -EOPNOTSUPP; + /* fall back to sw encryption for unsupported ciphers */ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_AES_CMAC: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_SMS4: + break; + default: + return -EOPNOTSUPP; + } + if (cmd == SET_KEY) { key->hw_key_idx = wcid->idx; wcid->hw_key_idx = idx; - } else { - if (idx == wcid->hw_key_idx) - wcid->hw_key_idx = -1; - - key = NULL; + } else if (idx == wcid->hw_key_idx) { + wcid->hw_key_idx = -1; } - mt76_wcid_key_setup(&dev->mt76, wcid, key); + mt76_wcid_key_setup(&dev->mt76, wcid, + cmd == SET_KEY ? key : NULL); - return mt7615_mcu_set_wtbl_key(dev, wcid->idx, key, cmd); + return mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); } static int mt7615_config(struct ieee80211_hw *hw, u32 changed) @@ -191,14 +220,14 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed) struct mt7615_dev *dev = hw->priv; int ret = 0; - mutex_lock(&dev->mt76.mutex); - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { ieee80211_stop_queues(hw); ret = mt7615_set_channel(dev); ieee80211_wake_queues(hw); } + mutex_lock(&dev->mt76.mutex); + if (changed & IEEE80211_CONF_CHANGE_POWER) ret = mt7615_mcu_set_tx_power(dev); @@ -220,16 +249,12 @@ static int mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, const struct ieee80211_tx_queue_params *params) { + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct mt7615_dev *dev = hw->priv; - static const u8 wmm_queue_map[] = { - [IEEE80211_AC_BK] = 0, - [IEEE80211_AC_BE] = 1, - [IEEE80211_AC_VI] = 2, - [IEEE80211_AC_VO] = 3, - }; - /* TODO: hw wmm_set 1~3 */ - return mt7615_mcu_set_wmm(dev, wmm_queue_map[queue], params); + queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS; + + return mt7615_mcu_set_wmm(dev, queue, params); } static void mt7615_configure_filter(struct ieee80211_hw *hw, @@ -299,6 +324,18 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, mutex_unlock(&dev->mt76.mutex); } +static void +mt7615_channel_switch_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *chandef) +{ + struct mt7615_dev *dev = hw->priv; + + mutex_lock(&dev->mt76.mutex); + mt7615_mcu_set_bcn(dev, vif, true); + mutex_unlock(&dev->mt76.mutex); +} + int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { @@ -358,7 +395,7 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw, break; } msta->n_rates = i; - mt7615_mcu_set_rates(dev, msta, NULL, msta->rates); + mt7615_mac_set_rates(dev, msta, NULL, msta->rates); msta->rate_probe = false; spin_unlock_bh(&dev->mt76.lock); } @@ -410,7 +447,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_txq *txq = sta->txq[params->tid]; struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; u16 tid = params->tid; - u16 *ssn = ¶ms->ssn; + u16 ssn = params->ssn; struct mt76_txq *mtxq; if (!txq) @@ -420,7 +457,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, switch (action) { case IEEE80211_AMPDU_RX_START: - mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, params->buf_size); mt7615_mcu_set_rx_ba(dev, params, 1); break; @@ -436,11 +473,10 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); mt7615_mcu_set_tx_ba(dev, params, 0); break; case IEEE80211_AMPDU_TX_START: - mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn); + mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn); ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_STOP_CONT: @@ -453,23 +489,6 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return 0; } -static void -mt7615_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - const u8 *mac) -{ - struct mt7615_dev *dev = hw->priv; - - set_bit(MT76_SCANNING, &dev->mt76.state); -} - -static void -mt7615_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct mt7615_dev *dev = hw->priv; - - clear_bit(MT76_SCANNING, &dev->mt76.state); -} - const struct ieee80211_ops mt7615_ops = { .tx = mt7615_tx, .start = mt7615_start, @@ -486,8 +505,10 @@ const struct ieee80211_ops mt7615_ops = { .set_rts_threshold = mt7615_set_rts_threshold, .wake_tx_queue = mt76_wake_tx_queue, .sta_rate_tbl_update = mt7615_sta_rate_tbl_update, - .sw_scan_start = mt7615_sw_scan, - .sw_scan_complete = mt7615_sw_scan_complete, + .sw_scan_start = mt76_sw_scan, + .sw_scan_complete = mt76_sw_scan_complete, .release_buffered_frames = mt76_release_buffered_frames, .get_txpower = mt76_get_txpower, + .channel_switch_beacon = mt7615_channel_switch_beacon, + .get_survey = mt76_get_survey, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index cdad2c8dc297..275d5eaed3b7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -75,7 +75,7 @@ static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb, txd = mcu_txd->txd; - val = FIELD_PREP(MT_TXD0_TX_BYTES, cpu_to_le16(skb->len)) | + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) | FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_MCU) | FIELD_PREP(MT_TXD0_Q_IDX, q_idx); txd[0] = cpu_to_le32(val); @@ -113,12 +113,38 @@ static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb, } static int +mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, + struct sk_buff *skb, int seq) +{ + struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; + int ret = 0; + + if (seq != rxd->seq) + return -EAGAIN; + + switch (cmd) { + case -MCU_CMD_PATCH_SEM_CONTROL: + skb_pull(skb, sizeof(*rxd) - 4); + ret = *skb->data; + break; + case MCU_EXT_CMD_GET_TEMP: + skb_pull(skb, sizeof(*rxd)); + ret = le32_to_cpu(*(__le32 *)skb->data); + break; + default: + break; + } + dev_kfree_skb(skb); + + return ret; +} + +static int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, int len, bool wait_resp) { struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); unsigned long expires = jiffies + 10 * HZ; - struct mt7615_mcu_rxd *rxd; struct sk_buff *skb; int ret, seq; @@ -141,16 +167,9 @@ mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, break; } - rxd = (struct mt7615_mcu_rxd *)skb->data; - if (seq != rxd->seq) - continue; - - if (cmd == -MCU_CMD_PATCH_SEM_CONTROL) { - skb_pull(skb, sizeof(*rxd) - 4); - ret = *skb->data; - } - dev_kfree_skb(skb); - break; + ret = mt7615_mcu_parse_response(dev, cmd, skb, seq); + if (ret != -EAGAIN) + break; } out: @@ -159,6 +178,62 @@ out: return ret; } +static void +mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + if (vif->csa_active) + ieee80211_csa_finish(vif); +} + +static void +mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; + + switch (rxd->ext_eid) { + case MCU_EXT_EVENT_RDD_REPORT: + ieee80211_radar_detected(dev->mt76.hw); + dev->hw_pattern++; + break; + case MCU_EXT_EVENT_CSA_NOTIFY: + ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_mcu_csa_finish, dev); + break; + default: + break; + } +} + +static void +mt7615_mcu_rx_unsolicited_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; + + switch (rxd->eid) { + case MCU_EVENT_EXT: + mt7615_mcu_rx_ext_event(dev, skb); + break; + default: + break; + } + dev_kfree_skb(skb); +} + +void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb) +{ + struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; + + if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT || + rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST || + rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP || + rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC || + !rxd->seq) + mt7615_mcu_rx_unsolicited_event(dev, skb); + else + mt76_mcu_rx_event(&dev->mt76, skb); +} + static int mt7615_mcu_init_download(struct mt7615_dev *dev, u32 addr, u32 len, u32 mode) { @@ -192,6 +267,7 @@ static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data, data += cur_len; len -= cur_len; + mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false); } return ret; @@ -257,9 +333,9 @@ static int mt7615_driver_own(struct mt7615_dev *dev) static int mt7615_load_patch(struct mt7615_dev *dev) { - const struct firmware *fw; - const struct mt7615_patch_hdr *hdr; const char *firmware = MT7615_ROM_PATCH; + const struct mt7615_patch_hdr *hdr; + const struct firmware *fw = NULL; int len, ret, sem; sem = mt7615_mcu_patch_sem_ctrl(dev, 1); @@ -275,7 +351,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev) ret = request_firmware(&fw, firmware, dev->mt76.dev); if (ret) - return ret; + goto out; if (!fw || !fw->data || fw->size < sizeof(*hdr)) { dev_err(dev->mt76.dev, "Invalid firmware\n"); @@ -323,7 +399,7 @@ out: return ret; } -static u32 gen_dl_mode(u8 feature_set, bool is_cr4) +static u32 mt7615_mcu_gen_dl_mode(u8 feature_set, bool is_cr4) { u32 ret = 0; @@ -337,14 +413,45 @@ static u32 gen_dl_mode(u8 feature_set, bool is_cr4) return ret; } +static int +mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev, + const struct mt7615_fw_trailer *hdr, + const u8 *data, bool is_cr4) +{ + int n_region = is_cr4 ? CR4_REGION_NUM : N9_REGION_NUM; + int err, i, offset = 0; + u32 len, addr, mode; + + for (i = 0; i < n_region; i++) { + mode = mt7615_mcu_gen_dl_mode(hdr[i].feature_set, is_cr4); + len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN; + addr = le32_to_cpu(hdr[i].addr); + + err = mt7615_mcu_init_download(dev, addr, len, mode); + if (err) { + dev_err(dev->mt76.dev, "Download request failed\n"); + return err; + } + + err = mt7615_mcu_send_firmware(dev, data + offset, len); + if (err) { + dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); + return err; + } + + offset += len; + } + + return 0; +} + static int mt7615_load_ram(struct mt7615_dev *dev) { const struct firmware *fw; const struct mt7615_fw_trailer *hdr; const char *n9_firmware = MT7615_FIRMWARE_N9; const char *cr4_firmware = MT7615_FIRMWARE_CR4; - u32 n9_ilm_addr, offset; - int i, ret; + int ret; ret = request_firmware(&fw, n9_firmware, dev->mt76.dev); if (ret) @@ -362,31 +469,12 @@ static int mt7615_load_ram(struct mt7615_dev *dev) dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n", hdr->fw_ver, hdr->build_date); - n9_ilm_addr = le32_to_cpu(hdr->addr); - - for (offset = 0, i = 0; i < N9_REGION_NUM; i++) { - u32 len, addr, mode; - - len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN; - addr = le32_to_cpu(hdr[i].addr); - mode = gen_dl_mode(hdr[i].feature_set, false); - - ret = mt7615_mcu_init_download(dev, addr, len, mode); - if (ret) { - dev_err(dev->mt76.dev, "Download request failed\n"); - goto out; - } - - ret = mt7615_mcu_send_firmware(dev, fw->data + offset, len); - if (ret) { - dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); - goto out; - } - - offset += len; - } + ret = mt7615_mcu_send_ram_firmware(dev, hdr, fw->data, false); + if (ret) + goto out; - ret = mt7615_mcu_start_firmware(dev, n9_ilm_addr, FW_START_OVERRIDE); + ret = mt7615_mcu_start_firmware(dev, le32_to_cpu(hdr->addr), + FW_START_OVERRIDE); if (ret) { dev_err(dev->mt76.dev, "Failed to start N9 firmware\n"); goto out; @@ -410,27 +498,9 @@ static int mt7615_load_ram(struct mt7615_dev *dev) dev_info(dev->mt76.dev, "CR4 Firmware Version: %.10s, Build Time: %.15s\n", hdr->fw_ver, hdr->build_date); - for (offset = 0, i = 0; i < CR4_REGION_NUM; i++) { - u32 len, addr, mode; - - len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN; - addr = le32_to_cpu(hdr[i].addr); - mode = gen_dl_mode(hdr[i].feature_set, true); - - ret = mt7615_mcu_init_download(dev, addr, len, mode); - if (ret) { - dev_err(dev->mt76.dev, "Download request failed\n"); - goto out; - } - - ret = mt7615_mcu_send_firmware(dev, fw->data + offset, len); - if (ret) { - dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); - goto out; - } - - offset += len; - } + ret = mt7615_mcu_send_ram_firmware(dev, hdr, fw->data, true); + if (ret) + goto out; ret = mt7615_mcu_start_firmware(dev, 0, FW_START_WORKING_PDA_CR4); if (ret) @@ -469,6 +539,8 @@ static int mt7615_load_firmware(struct mt7615_dev *dev) return -EIO; } + mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false); + dev_dbg(dev->mt76.dev, "Firmware init done\n"); return 0; @@ -573,6 +645,8 @@ int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, #define WMM_CW_MIN_SET BIT(1) #define WMM_CW_MAX_SET BIT(2) #define WMM_TXOP_SET BIT(3) +#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \ + WMM_CW_MAX_SET | WMM_TXOP_SET) struct req_data { u8 number; u8 rsv[3]; @@ -585,19 +659,17 @@ int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, } __packed req = { .number = 1, .queue = queue, - .valid = WMM_AIFS_SET | WMM_TXOP_SET, + .valid = WMM_PARAM_SET, .aifs = params->aifs, + .cw_min = 5, + .cw_max = cpu_to_le16(10), .txop = cpu_to_le16(params->txop), }; - if (params->cw_min) { - req.valid |= WMM_CW_MIN_SET; - req.cw_min = params->cw_min; - } - if (params->cw_max) { - req.valid |= WMM_CW_MAX_SET; - req.cw_max = cpu_to_le16(params->cw_max); - } + if (params->cw_min) + req.cw_min = fls(params->cw_min); + if (params->cw_max) + req.cw_max = cpu_to_le16(fls(params->cw_max)); return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req, sizeof(req), true); @@ -824,78 +896,6 @@ int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, return ret; } -static enum mt7615_cipher_type -mt7615_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) -{ - if (!key || key->keylen > 32) - return MT_CIPHER_NONE; - - memcpy(key_data, key->key, key->keylen); - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - return MT_CIPHER_WEP40; - case WLAN_CIPHER_SUITE_WEP104: - return MT_CIPHER_WEP104; - case WLAN_CIPHER_SUITE_TKIP: - /* Rx/Tx MIC keys are swapped */ - memcpy(key_data + 16, key->key + 24, 8); - memcpy(key_data + 24, key->key + 16, 8); - return MT_CIPHER_TKIP; - case WLAN_CIPHER_SUITE_CCMP: - return MT_CIPHER_AES_CCMP; - case WLAN_CIPHER_SUITE_CCMP_256: - return MT_CIPHER_CCMP_256; - case WLAN_CIPHER_SUITE_GCMP: - return MT_CIPHER_GCMP; - case WLAN_CIPHER_SUITE_GCMP_256: - return MT_CIPHER_GCMP_256; - case WLAN_CIPHER_SUITE_SMS4: - return MT_CIPHER_WAPI; - default: - return MT_CIPHER_NONE; - } -} - -int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid, - struct ieee80211_key_conf *key, - enum set_key_cmd cmd) -{ - struct { - struct wtbl_req_hdr hdr; - struct wtbl_sec_key key; - } req = { - .hdr = { - .wlan_idx = wcid, - .operation = WTBL_SET, - .tlv_num = cpu_to_le16(1), - }, - .key = { - .tag = cpu_to_le16(WTBL_SEC_KEY), - .len = cpu_to_le16(sizeof(struct wtbl_sec_key)), - .add = cmd, - }, - }; - - if (cmd == SET_KEY) { - u8 cipher; - - cipher = mt7615_get_key_info(key, req.key.key_material); - if (cipher == MT_CIPHER_NONE) - return -EOPNOTSUPP; - - req.key.rkv = 1; - req.key.cipher_id = cipher; - req.key.key_id = key->keyidx; - req.key.key_len = key->keylen; - } else { - req.key.key_len = sizeof(req.key.key_material); - } - - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE, - &req, sizeof(req), true); -} - static int mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev, struct mt7615_vif *mvif) @@ -1099,6 +1099,7 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif, { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct mt76_wcid *wcid = &dev->mt76.global_wcid; + struct ieee80211_mutable_offsets offs; struct req { u8 omac_idx; u8 enable; @@ -1119,13 +1120,10 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif, .enable = en, .wlan_idx = wcid->idx, .band_idx = mvif->band_idx, - /* pky_type: 0 for bcn, 1 for tim */ - .pkt_type = 0, }; struct sk_buff *skb; - u16 tim_off; - skb = ieee80211_beacon_get_tim(mt76_hw(dev), vif, &tim_off, NULL); + skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs); if (!skb) return -EINVAL; @@ -1139,8 +1137,14 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif, 0, NULL); memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len); req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); - req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + tim_off); + req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset); + if (offs.csa_counter_offs[0]) { + u16 csa_offs; + csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4; + req.csa_ie_pos = cpu_to_le16(csa_offs); + req.csa_cnt = skb->data[offs.csa_counter_offs[0]]; + } dev_kfree_skb(skb); return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD, @@ -1213,9 +1217,62 @@ out: return ret; } +int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev, + enum mt7615_rdd_cmd cmd, u8 index, + u8 rx_sel, u8 val) +{ + struct { + u8 ctrl; + u8 rdd_idx; + u8 rdd_rx_sel; + u8 val; + u8 rsv[4]; + } req = { + .ctrl = cmd, + .rdd_idx = index, + .rdd_rx_sel = rx_sel, + .val = val, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL, + &req, sizeof(req), true); +} + +int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev) +{ + struct { + u8 pulse_num; + u8 rsv[3]; + struct { + u32 start_time; + u16 width; + s16 power; + } pattern[32]; + } req = { + .pulse_num = dev->radar_pattern.n_pulses, + }; + u32 start_time = ktime_to_ms(ktime_get_boottime()); + int i; + + if (dev->radar_pattern.n_pulses > ARRAY_SIZE(req.pattern)) + return -EINVAL; + + /* TODO: add some noise here */ + for (i = 0; i < dev->radar_pattern.n_pulses; i++) { + req.pattern[i].width = dev->radar_pattern.width; + req.pattern[i].power = dev->radar_pattern.power; + req.pattern[i].start_time = start_time + + i * dev->radar_pattern.period; + } + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_PATTERN, + &req, sizeof(req), false); +} + int mt7615_mcu_set_channel(struct mt7615_dev *dev) { - struct cfg80211_chan_def *chdef = &dev->mt76.chandef; + struct cfg80211_chan_def *chandef = &dev->mt76.chandef; + int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; struct { u8 control_chan; u8 center_chan; @@ -1234,17 +1291,20 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev) u8 rsv1[3]; u8 txpower_sku[53]; u8 rsv2[3]; - } req = {0}; + } req = { + .control_chan = chandef->chan->hw_value, + .center_chan = ieee80211_frequency_to_channel(freq1), + .tx_streams = (dev->mt76.chainmask >> 8) & 0xf, + .rx_streams_mask = dev->mt76.antenna_mask, + .center_chan2 = ieee80211_frequency_to_channel(freq2), + }; int ret; - req.control_chan = chdef->chan->hw_value; - req.center_chan = ieee80211_frequency_to_channel(chdef->center_freq1); - req.tx_streams = (dev->mt76.chainmask >> 8) & 0xf; - req.rx_streams_mask = dev->mt76.antenna_mask; - req.switch_reason = CH_SWITCH_NORMAL; - req.band_idx = 0; - req.center_chan2 = ieee80211_frequency_to_channel(chdef->center_freq2); - req.txpower_drop = 0; + if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && + chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) + req.switch_reason = CH_SWITCH_DFS; + else + req.switch_reason = CH_SWITCH_NORMAL; switch (dev->mt76.chandef.width) { case NL80211_CHAN_WIDTH_40: @@ -1269,6 +1329,7 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev) case NL80211_CHAN_WIDTH_20: default: req.bw = CMD_CBW_20MHZ; + break; } memset(req.txpower_sku, 0x3f, 49); @@ -1533,92 +1594,15 @@ int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev, &wtbl_req, sizeof(wtbl_req), true); } -void mt7615_mcu_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta, - struct ieee80211_tx_rate *probe_rate, - struct ieee80211_tx_rate *rates) +int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index) { - int wcid = sta->wcid.idx; - u32 addr = MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE; - bool stbc = false; - int n_rates = sta->n_rates; - u8 bw, bw_prev, bw_idx = 0; - u16 val[4]; - u16 probe_val; - u32 w5, w27; - int i; - - if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) - return; - - for (i = n_rates; i < 4; i++) - rates[i] = rates[n_rates - 1]; - - val[0] = mt7615_mac_tx_rate_val(dev, &rates[0], stbc, &bw); - bw_prev = bw; - - if (probe_rate) { - probe_val = mt7615_mac_tx_rate_val(dev, probe_rate, stbc, &bw); - if (bw) - bw_idx = 1; - else - bw_prev = 0; - } else { - probe_val = val[0]; - } - - val[1] = mt7615_mac_tx_rate_val(dev, &rates[1], stbc, &bw); - if (bw_prev) { - bw_idx = 3; - bw_prev = bw; - } - - val[2] = mt7615_mac_tx_rate_val(dev, &rates[2], stbc, &bw); - if (bw_prev) { - bw_idx = 5; - bw_prev = bw; - } - - val[3] = mt7615_mac_tx_rate_val(dev, &rates[3], stbc, &bw); - if (bw_prev) - bw_idx = 7; - - w27 = mt76_rr(dev, addr + 27 * 4); - w27 &= ~MT_WTBL_W27_CC_BW_SEL; - w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, bw); - - w5 = mt76_rr(dev, addr + 5 * 4); - w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE); - w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, bw) | - FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, bw_idx ? bw_idx - 1 : 7); - - mt76_wr(dev, MT_WTBL_RIUCR0, w5); - - mt76_wr(dev, MT_WTBL_RIUCR1, - FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) | - FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) | - FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[0])); - - mt76_wr(dev, MT_WTBL_RIUCR2, - FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[0] >> 8) | - FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) | - FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[1]) | - FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2])); - - mt76_wr(dev, MT_WTBL_RIUCR3, - FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) | - FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[2]) | - FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3])); - - mt76_wr(dev, MT_WTBL_UPDATE, - FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | - MT_WTBL_UPDATE_RATE_UPDATE | - MT_WTBL_UPDATE_TX_COUNT_CLEAR); - - mt76_wr(dev, addr + 27 * 4, w27); - - if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) - mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); + struct { + u8 action; + u8 rsv[3]; + } req = { + .action = index, + }; - sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; - sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req, + sizeof(req), true); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h index f8b51ad25220..1fd7dffa6eef 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -23,6 +23,28 @@ struct mt7615_mcu_txd { u32 reserved[5]; } __packed __aligned(4); +/* event table */ +enum { + MCU_EVENT_TARGET_ADDRESS_LEN = 0x01, + MCU_EVENT_FW_START = 0x01, + MCU_EVENT_GENERIC = 0x01, + MCU_EVENT_ACCESS_REG = 0x02, + MCU_EVENT_MT_PATCH_SEM = 0x04, + MCU_EVENT_CH_PRIVILEGE = 0x18, + MCU_EVENT_EXT = 0xed, + MCU_EVENT_RESTART_DL = 0xef, +}; + +/* ext event table */ +enum { + MCU_EXT_EVENT_PS_SYNC = 0x5, + MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13, + MCU_EXT_EVENT_THERMAL_PROTECT = 0x22, + MCU_EXT_EVENT_ASSERT_DUMP = 0x23, + MCU_EXT_EVENT_RDD_REPORT = 0x3a, + MCU_EXT_EVENT_CSA_NOTIFY = 0x4f, +}; + struct mt7615_mcu_rxd { __le32 rxd[4]; @@ -76,11 +98,14 @@ enum { MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26, MCU_EXT_CMD_EDCA_UPDATE = 0x27, MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A, + MCU_EXT_CMD_GET_TEMP = 0x2c, MCU_EXT_CMD_WTBL_UPDATE = 0x32, + MCU_EXT_CMD_SET_RDD_CTRL = 0x3a, MCU_EXT_CMD_PROTECT_CTRL = 0x3e, MCU_EXT_CMD_MAC_INIT_CTRL = 0x46, MCU_EXT_CMD_BCN_OFFLOAD = 0x49, MCU_EXT_CMD_SET_RX_PATH = 0x4e, + MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d, }; enum { @@ -264,34 +289,6 @@ struct wtbl_hdr_trans { u8 rsv; } __packed; -enum mt7615_cipher_type { - MT_CIPHER_NONE, - MT_CIPHER_WEP40, - MT_CIPHER_TKIP, - MT_CIPHER_TKIP_NO_MIC, - MT_CIPHER_AES_CCMP, - MT_CIPHER_WEP104, - MT_CIPHER_BIP_CMAC_128, - MT_CIPHER_WEP128, - MT_CIPHER_WAPI, - MT_CIPHER_CCMP_256 = 10, - MT_CIPHER_GCMP, - MT_CIPHER_GCMP_256, -}; - -struct wtbl_sec_key { - __le16 tag; - __le16 len; - u8 add; /* 0: add, 1: remove */ - u8 rkv; - u8 ikv; - u8 cipher_id; - u8 key_id; - u8 key_len; - u8 rsv[2]; - u8 key_material[32]; -} __packed; - enum { MT_BA_TYPE_INVALID, MT_BA_TYPE_ORIGINATOR, @@ -375,7 +372,6 @@ struct wtbl_raw { sizeof(struct wtbl_vht) + \ sizeof(struct wtbl_tx_ps) + \ sizeof(struct wtbl_hdr_trans) + \ - sizeof(struct wtbl_sec_key) + \ sizeof(struct wtbl_ba) + \ sizeof(struct wtbl_bf) + \ sizeof(struct wtbl_smps) + \ diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index f02ffcffe637..cef3fd43cb00 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -10,12 +10,13 @@ #include "regs.h" #define MT7615_MAX_INTERFACES 4 +#define MT7615_MAX_WMM_SETS 4 #define MT7615_WTBL_SIZE 128 #define MT7615_WTBL_RESERVED (MT7615_WTBL_SIZE - 1) #define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \ MT7615_MAX_INTERFACES) -#define MT7615_WATCHDOG_TIME 100 /* ms */ +#define MT7615_WATCHDOG_TIME (HZ / 10) #define MT7615_RATE_RETRY 2 #define MT7615_TX_RING_SIZE 1024 @@ -32,6 +33,9 @@ #define MT7615_EEPROM_SIZE 1024 #define MT7615_TOKEN_SIZE 4096 +#define MT_FRAC_SCALE 12 +#define MT_FRAC(val, div) (((val) << MT_FRAC_SCALE) / (div)) + struct mt7615_vif; struct mt7615_sta; @@ -42,12 +46,21 @@ enum mt7615_hw_txq_id { MT7615_TXQ_FWDL, }; +struct mt7615_rate_set { + struct ieee80211_tx_rate probe_rate; + struct ieee80211_tx_rate rates[4]; +}; + struct mt7615_sta { struct mt76_wcid wcid; /* must be first */ struct mt7615_vif *vif; - struct ieee80211_tx_rate rates[8]; + struct ieee80211_tx_rate rates[4]; + + struct mt7615_rate_set rateset[2]; + u32 rate_set_tsf; + u8 rate_count; u8 n_rates; @@ -68,6 +81,22 @@ struct mt7615_dev { u32 vif_mask; u32 omac_mask; + struct { + u8 n_pulses; + u32 period; + u16 width; + s16 power; + } radar_pattern; + u32 hw_pattern; + int dfs_state; + + int false_cca_ofdm, false_cca_cck; + unsigned long last_cca_adj; + u8 mac_work_count; + s8 ofdm_sensitivity; + s8 cck_sensitivity; + bool scs_en; + spinlock_t token_lock; struct idr token; }; @@ -97,6 +126,30 @@ enum { EXT_BSSID_END }; +enum { + MT_HW_RDD0, + MT_HW_RDD1, +}; + +enum { + MT_RX_SEL0, + MT_RX_SEL1, +}; + +enum mt7615_rdd_cmd { + RDD_STOP, + RDD_START, + RDD_DET_MODE, + RDD_DET_STOP, + RDD_CAC_START, + RDD_CAC_END, + RDD_NORMAL_START, + RDD_DISABLE_DFS_CAL, + RDD_PULSE_DBG, + RDD_READ_PULSE, + RDD_RESUME_BF, +}; + extern const struct ieee80211_ops mt7615_ops; extern struct pci_driver mt7615_pci_driver; @@ -115,10 +168,7 @@ int mt7615_mcu_set_dev_info(struct mt7615_dev *dev, struct ieee80211_vif *vif, bool enable); int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif, int en); -int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid, - struct ieee80211_key_conf *key, - enum set_key_cmd cmd); -void mt7615_mcu_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta, +void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta, struct ieee80211_tx_rate *probe_rate, struct ieee80211_tx_rate *rates); int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif, @@ -144,6 +194,29 @@ int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev, bool add); int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb); +int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev, + enum mt7615_rdd_cmd cmd, u8 index, + u8 rx_sel, u8 val); +int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev); +int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev); +int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev); + +static inline bool is_mt7622(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7622; +} + +static inline void mt7615_dfs_check_channel(struct mt7615_dev *dev) +{ + enum nl80211_chan_width width = dev->mt76.chandef.width; + u32 freq = dev->mt76.chandef.chan->center_freq; + struct ieee80211_hw *hw = mt76_hw(dev); + + if (hw->conf.chandef.chan->center_freq != freq || + hw->conf.chandef.width != width) + dev->dfs_state = -1; +} static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask) { @@ -155,9 +228,9 @@ static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask) mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); } -u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev, - const struct ieee80211_tx_rate *rate, - bool stbc, u8 *bw); +void mt7615_update_channel(struct mt76_dev *mdev); +void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev); +void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable); int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta, int pid, @@ -165,11 +238,15 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb); void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data); void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb); +int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, + enum set_key_cmd cmd); int mt7615_mcu_set_eeprom(struct mt7615_dev *dev); int mt7615_mcu_init_mac(struct mt7615_dev *dev); int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val); int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter); +int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index); int mt7615_mcu_set_tx_power(struct mt7615_dev *dev); void mt7615_mcu_exit(struct mt7615_dev *dev); @@ -193,5 +270,9 @@ void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, void mt7615_mac_work(struct work_struct *work); void mt7615_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *txwi); +int mt76_dfs_start_rdd(struct mt7615_dev *dev, bool force); +int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev); + +int mt7615_init_debugfs(struct mt7615_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c index 9e82cb53fd60..e250607e0a80 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c @@ -81,6 +81,7 @@ static int mt7615_pci_probe(struct pci_dev *pdev, .sta_add = mt7615_sta_add, .sta_assoc = mt7615_sta_assoc, .sta_remove = mt7615_sta_remove, + .update_survey = mt7615_update_channel, }; struct mt7615_dev *dev; struct mt76_dev *mdev; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h index 70e5ace33cc3..b193814d5cf8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h @@ -71,6 +71,34 @@ #define MT_WF_PHY_WF2_RFCTRL0 MT_WF_PHY(0x1900) #define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9) +#define MT_WF_PHY_R0_B0_PHYMUX_5 MT_WF_PHY(0x0614) + +#define MT_WF_PHY_R0_B0_PHYCTRL_STS0 MT_WF_PHY(0x020c) +#define MT_WF_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16) +#define MT_WF_PHYCTRL_STAT_PD_CCK GENMASK(15, 0) + +#define MT_WF_PHY_R0_B0_PHYCTRL_STS5 MT_WF_PHY(0x0220) +#define MT_WF_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16) +#define MT_WF_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0) + +#define MT_WF_PHY_B0_MIN_PRI_PWR MT_WF_PHY(0x229c) +#define MT_WF_PHY_B0_PD_OFDM_MASK GENMASK(28, 20) +#define MT_WF_PHY_B0_PD_OFDM(v) ((v) << 20) +#define MT_WF_PHY_B0_PD_BLK BIT(19) + +#define MT_WF_PHY_B1_MIN_PRI_PWR MT_WF_PHY(0x084) +#define MT_WF_PHY_B1_PD_OFDM_MASK GENMASK(24, 16) +#define MT_WF_PHY_B1_PD_OFDM(v) ((v) << 16) +#define MT_WF_PHY_B1_PD_BLK BIT(25) + +#define MT_WF_PHY_B0_RXTD_CCK_PD MT_WF_PHY(0x2310) +#define MT_WF_PHY_B0_PD_CCK_MASK GENMASK(8, 1) +#define MT_WF_PHY_B0_PD_CCK(v) ((v) << 1) + +#define MT_WF_PHY_B1_RXTD_CCK_PD MT_WF_PHY(0x2314) +#define MT_WF_PHY_B1_PD_CCK_MASK GENMASK(31, 24) +#define MT_WF_PHY_B1_PD_CCK(v) ((v) << 24) + #define MT_WF_CFG_BASE 0x20200 #define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs)) @@ -97,12 +125,25 @@ MT_AGG_ARxCR_LIMIT_SHIFT(_n), \ MT_AGG_ARxCR_LIMIT_SHIFT(_n)) +#define MT_AGG_ACR0 MT_WF_AGG(0x070) +#define MT_AGG_ACR1 MT_WF_AGG(0x170) +#define MT_AGG_ACR_NO_BA_RULE BIT(0) +#define MT_AGG_ACR_NO_BA_AR_RULE BIT(1) +#define MT_AGG_ACR_PKT_TIME_EN BIT(2) +#define MT_AGG_ACR_CFEND_RATE GENMASK(15, 4) +#define MT_AGG_ACR_BAR_RATE GENMASK(31, 20) + #define MT_AGG_SCR MT_WF_AGG(0x0fc) #define MT_AGG_SCR_NLNAV_MID_PTEC_DIS BIT(3) #define MT_WF_TMAC_BASE 0x21000 #define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs)) +#define MT_TMAC_TRCR0 MT_WF_TMAC(0x09c) +#define MT_TMAC_TRCR1 MT_WF_TMAC(0x070) +#define MT_TMAC_TRCR_CCA_SEL GENMASK(31, 30) +#define MT_TMAC_TRCR_SEC_CCA_SEL GENMASK(29, 28) + #define MT_TMAC_CTCR0 MT_WF_TMAC(0x0f4) #define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0) #define MT_TMAC_CTCR0_INS_DDLMT_DENSITY GENMASK(15, 12) @@ -148,8 +189,15 @@ #define MT_WTBL_OFF_BASE 0x23400 #define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n)) +#define MT_WTBL_W0_KEY_IDX GENMASK(24, 23) +#define MT_WTBL_W0_RX_KEY_VALID BIT(26) +#define MT_WTBL_W0_RX_IK_VALID BIT(27) + +#define MT_WTBL_W2_KEY_TYPE GENMASK(7, 4) + #define MT_WTBL_UPDATE MT_WTBL_OFF(0x030) #define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0) +#define MT_WTBL_UPDATE_RXINFO_UPDATE BIT(11) #define MT_WTBL_UPDATE_RATE_UPDATE BIT(13) #define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14) #define MT_WTBL_UPDATE_BUSY BIT(31) @@ -157,6 +205,9 @@ #define MT_WTBL_ON_BASE 0x23000 #define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n)) +#define MT_WTBL_RICR0 MT_WTBL_ON(0x010) +#define MT_WTBL_RICR1 MT_WTBL_ON(0x014) + #define MT_WTBL_RIUCR0 MT_WTBL_ON(0x020) #define MT_WTBL_RIUCR1 MT_WTBL_ON(0x024) @@ -181,8 +232,32 @@ #define MT_WTBL_W5_SHORT_GI_80 BIT(10) #define MT_WTBL_W5_SHORT_GI_160 BIT(11) #define MT_WTBL_W5_BW_CAP GENMASK(13, 12) +#define MT_WTBL_W5_MPDU_FAIL_COUNT GENMASK(25, 23) +#define MT_WTBL_W5_MPDU_OK_COUNT GENMASK(28, 26) +#define MT_WTBL_W5_RATE_IDX GENMASK(31, 29) + #define MT_WTBL_W27_CC_BW_SEL GENMASK(6, 5) +#define MT_LPON_BASE 0x24200 +#define MT_LPON(_n) (MT_LPON_BASE + (_n)) + +#define MT_LPON_T0CR MT_LPON(0x010) +#define MT_LPON_T0CR_MODE GENMASK(1, 0) + +#define MT_LPON_UTTR0 MT_LPON(0x018) +#define MT_LPON_UTTR1 MT_LPON(0x01c) + +#define MT_WF_MIB_BASE 0x24800 +#define MT_WF_MIB(ofs) (MT_WF_MIB_BASE + (ofs)) + +#define MT_MIB_M0_MISC_CR MT_WF_MIB(0x00c) +#define MT_MIB_MB_SDR0(n) MT_WF_MIB(0x100 + ((n) << 4)) +#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16) +#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0) + +#define MT_MIB_SDR16(n) MT_WF_MIB(0x48 + ((n) << 9)) +#define MT_MIB_BUSY_MASK GENMASK(23, 0) + #define MT_EFUSE_BASE 0x81070000 #define MT_EFUSE_BASE_CTRL 0x000 #define MT_EFUSE_BASE_CTRL_EMPTY BIT(30) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig index 209d8abc49d5..7c88ed8b8f1e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig @@ -10,7 +10,11 @@ config MT76x0U depends on MAC80211 depends on USB help - This adds support for MT7610U-based wireless USB dongles. + This adds support for MT7610U-based wireless USB 2.0 dongles, + which comply with IEEE 802.11ac standards and support 1x1 + 433Mbps PHY rate. + + To compile this driver as a module, choose M here. config MT76x0E tristate "MediaTek MT76x0E (PCIe) support" @@ -18,4 +22,8 @@ config MT76x0E depends on MAC80211 depends on PCI help - This adds support for MT7610/MT7630-based wireless PCIe devices. + This adds support for MT7610/MT7630-based wireless PCIe devices, + which comply with IEEE 802.11ac standards and support 1x1 + 433Mbps PHY rate. + + To compile this driver as a module, choose M here. diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c index d7bf7bc15e52..efb7ca93863d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c @@ -8,18 +8,16 @@ #include <linux/etherdevice.h> #include "mt76x0.h" -static int +static void mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { - int ret; - cancel_delayed_work_sync(&dev->cal_work); - dev->beacon_ops->pre_tbtt_enable(dev, false); + mt76x02_pre_tbtt_enable(dev, false); if (mt76_is_mmio(dev)) tasklet_disable(&dev->dfs_pd.dfs_tasklet); mt76_set_channel(&dev->mt76); - ret = mt76x0_phy_set_channel(dev, chandef); + mt76x0_phy_set_channel(dev, chandef); /* channel cycle counters read-and-clear */ mt76_rr(dev, MT_CH_IDLE); @@ -31,23 +29,20 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) mt76x02_dfs_init_params(dev); tasklet_enable(&dev->dfs_pd.dfs_tasklet); } - dev->beacon_ops->pre_tbtt_enable(dev, true); + mt76x02_pre_tbtt_enable(dev, true); mt76_txq_schedule_all(&dev->mt76); - - return ret; } int mt76x0_config(struct ieee80211_hw *hw, u32 changed) { struct mt76x02_dev *dev = hw->priv; - int ret = 0; mutex_lock(&dev->mt76.mutex); if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { ieee80211_stop_queues(hw); - ret = mt76x0_set_channel(dev, &hw->conf.chandef); + mt76x0_set_channel(dev, &hw->conf.chandef); ieee80211_wake_queues(hw); } @@ -69,6 +64,6 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed) mutex_unlock(&dev->mt76.mutex); - return ret; + return 0; } EXPORT_SYMBOL_GPL(mt76x0_config); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h index 97e47cd2d744..26517e062bdb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -54,7 +54,7 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed); /* PHY */ void mt76x0_phy_init(struct mt76x02_dev *dev); int mt76x0_phy_wait_bbp_ready(struct mt76x02_dev *dev); -int mt76x0_phy_set_channel(struct mt76x02_dev *dev, +void mt76x0_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef); void mt76x0_phy_set_txpower(struct mt76x02_dev *dev); void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index 6117e6ca08cb..7705e55aa3d1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> @@ -93,7 +82,7 @@ static const struct ieee80211_ops mt76x0e_ops = { .sta_state = mt76_sta_state, .set_key = mt76x0e_set_key, .conf_tx = mt76x02_conf_tx, - .sw_scan_start = mt76x02_sw_scan, + .sw_scan_start = mt76_sw_scan, .sw_scan_complete = mt76x02_sw_scan_complete, .ampdu_action = mt76x02_ampdu_action, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c index 490c1869f2c4..038187b390ce 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> #include <linux/firmware.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 1ecfc334ae79..711a352dfd5c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -109,7 +109,7 @@ mt76x0_rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val) }; WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING, - &dev->mt76.state)); + &dev->mt76.state)); return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1); } else { return mt76x0_rf_csr_wr(dev, offset, val); @@ -127,7 +127,7 @@ static int mt76x0_rf_rr(struct mt76x02_dev *dev, u32 offset) }; WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING, - &dev->mt76.state)); + &dev->mt76.state)); ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1); val = pair.value; } else { @@ -230,7 +230,8 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band) } static void -mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band) +mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, + u16 rf_bw_band) { const struct mt76x0_freq_item *freq_item; u16 rf_band = rf_bw_band & 0xff00; @@ -252,9 +253,9 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban rf_band = mt76x0_frequency_plan[i].band; if (b_sdm) - freq_item = &(mt76x0_sdm_frequency_plan[i]); + freq_item = &mt76x0_sdm_frequency_plan[i]; else - freq_item = &(mt76x0_frequency_plan[i]); + freq_item = &mt76x0_frequency_plan[i]; mt76x0_rf_wr(dev, MT_RF(0, 37), freq_item->pllR37); mt76x0_rf_wr(dev, MT_RF(0, 36), freq_item->pllR36); @@ -359,11 +360,12 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; if (mt76x02_ext_pa_enabled(dev, band)) { - /* - MT_RF_MISC (offset: 0x0518) - [2]1'b1: enable external A band PA, 1'b0: disable external A band PA - [3]1'b1: enable external G band PA, 1'b0: disable external G band PA - */ + /* MT_RF_MISC (offset: 0x0518) + * [2]1'b1: enable external A band PA + * 1'b0: disable external A band PA + * [3]1'b1: enable external G band PA + * 1'b0: disable external G band PA + */ if (rf_band & RF_A_BAND) mt76_set(dev, MT_RF_MISC, BIT(2)); else @@ -385,7 +387,9 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg); } else { mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x686A7800); - /* Set Atten mode = 0 For Ext A band, Disable Tx Inc dcoc Cal. */ + /* Set Atten mode = 0 + * For Ext A band, Disable Tx Inc dcoc Cal. + */ mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1); mac_reg &= 0x890400FF; mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg); @@ -490,7 +494,7 @@ mt76x0_phy_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width) case NL80211_CHAN_WIDTH_160: case NL80211_CHAN_WIDTH_5: /* TODO error */ - return ; + return; } mt76x02_mcu_function_select(dev, BW_SETTING, bw); @@ -905,8 +909,8 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) } EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate); -int mt76x0_phy_set_channel(struct mt76x02_dev *dev, - struct cfg80211_chan_def *chandef) +void mt76x0_phy_set_channel(struct mt76x02_dev *dev, + struct cfg80211_chan_def *chandef) { u32 ext_cca_chan[4] = { [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) | @@ -940,7 +944,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev, freq1 = chandef->center_freq1; channel = chandef->chan->hw_value; rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND; - dev->mt76.chandef = *chandef; switch (chandef->width) { case NL80211_CHAN_WIDTH_40: @@ -1001,7 +1004,7 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev, /* enable vco */ mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); if (scan) - return 0; + return; mt76x02_init_agc_gain(dev); mt76x0_phy_calibrate(dev, false); @@ -1009,8 +1012,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev, ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); - - return 0; } static void mt76x0_phy_temp_sensor(struct mt76x02_dev *dev) @@ -1074,7 +1075,7 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev) dev->cal.avg_rssi_all = -75; low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + - (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); gain_change = dev->cal.low_gain < 0 || (dev->cal.low_gain & 2) ^ (low_gain & 2); @@ -1169,7 +1170,8 @@ static void mt76x0_phy_rf_init(struct mt76x02_dev *dev) if (item->bw_band == RF_BW_20) mt76x0_rf_wr(dev, item->rf_bank_reg, item->value); - else if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20)) + else if (((RF_G_BAND | RF_BW_20) & item->bw_band) == + (RF_G_BAND | RF_BW_20)) mt76x0_rf_wr(dev, item->rf_bank_reg, item->value); } @@ -1181,10 +1183,9 @@ static void mt76x0_phy_rf_init(struct mt76x02_dev *dev) } } - /* - Frequency calibration - E1: B0.R22<6:0>: xo_cxo<6:0> - E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1> + /* Frequency calibration + * E1: B0.R22<6:0>: xo_cxo<6:0> + * E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1> */ mt76x0_rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->cal.rx.freq_offset, 0xbf)); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h index b4b2ca747699..441d6559d4fd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h @@ -6,8 +6,8 @@ #ifndef _MT76X0_PHY_H_ #define _MT76X0_PHY_H_ -#define RF_G_BAND 0x0100 -#define RF_A_BAND 0x0200 +#define RF_G_BAND 0x0100 +#define RF_A_BAND 0x0200 #define RF_A_BAND_LB 0x0400 #define RF_A_BAND_MB 0x0800 #define RF_A_BAND_HB 0x1000 @@ -18,9 +18,9 @@ #define RF_BW_10 4 #define RF_BW_80 8 -#define MT_RF(bank, reg) ((bank) << 16 | (reg)) -#define MT_RF_BANK(offset) (offset >> 16) -#define MT_RF_REG(offset) (offset & 0xff) +#define MT_RF(bank, reg) ((bank) << 16 | (reg)) +#define MT_RF_BANK(offset) ((offset) >> 16) +#define MT_RF_REG(offset) ((offset) & 0xff) #define MT_RF_VCO_BP_CLOSE_LOOP BIT(3) #define MT_RF_VCO_BP_CLOSE_LOOP_MASK GENMASK(3, 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 645f4d15fb61..00a445d27599 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -32,10 +32,13 @@ static struct usb_device_id mt76x0_device_table[] = { { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */ { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */ { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */ - { USB_DEVICE(0x2357, 0x0105), - .driver_info = 1, }, /* TP-LINK Archer T1U */ - { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7630U */ - { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, /* MT7650U */ + { USB_DEVICE(0x2357, 0x0123) }, /* TP-LINK T2UHP */ + /* TP-LINK Archer T1U */ + { USB_DEVICE(0x2357, 0x0105), .driver_info = 1, }, + /* MT7630U */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, + /* MT7650U */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, { 0, } }; @@ -125,13 +128,14 @@ static const struct ieee80211_ops mt76x0u_ops = { .sta_state = mt76_sta_state, .set_key = mt76x02_set_key, .conf_tx = mt76x02_conf_tx, - .sw_scan_start = mt76x02_sw_scan, + .sw_scan_start = mt76_sw_scan, .sw_scan_complete = mt76x02_sw_scan_complete, .ampdu_action = mt76x02_ampdu_action, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .set_rts_threshold = mt76x02_set_rts_threshold, .wake_tx_queue = mt76_wake_tx_queue, .get_txpower = mt76_get_txpower, + .get_survey = mt76_get_survey, .set_tim = mt76_set_tim, .release_buffered_frames = mt76_release_buffered_frames, }; @@ -161,6 +165,13 @@ static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset) FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); + mt76_wr(dev, MT_CH_TIME_CFG, + MT_CH_TIME_CFG_TIMER_EN | + MT_CH_TIME_CFG_TX_AS_BUSY | + MT_CH_TIME_CFG_RX_AS_BUSY | + MT_CH_TIME_CFG_NAV_AS_BUSY | + MT_CH_TIME_CFG_EIFS_AS_BUSY); + return 0; } @@ -200,6 +211,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { static const struct mt76_driver_ops drv_ops = { + .update_survey = mt76x02_update_channel, .tx_prepare_skb = mt76x02u_tx_prepare_skb, .tx_complete_skb = mt76x02u_tx_complete_skb, .tx_status_data = mt76x02_tx_status_data, @@ -236,7 +248,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, if (ret) goto err; - /* Disable the HW, otherwise MCU fail to initalize on hot reboot */ + /* Disable the HW, otherwise MCU fail to initialize on hot reboot */ mt76x0_chip_onoff(dev, false, false); if (!mt76x02_wait_for_mac(mdev)) { @@ -274,9 +286,9 @@ err: static void mt76x0_disconnect(struct usb_interface *usb_intf) { struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); - bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + bool initialized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); - if (!initalized) + if (!initialized) return; ieee80211_unregister_hw(dev->mt76.hw); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c index 4a282761ca58..888a930a5e08 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> #include <linux/firmware.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index f7fd53a1738a..e858bba8c8ff 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x02_H @@ -71,10 +60,15 @@ struct mt76x02_calibration { struct mt76x02_beacon_ops { unsigned int nslots; unsigned int slot_size; - void (*pre_tbtt_enable) (struct mt76x02_dev *, bool); - void (*beacon_enable) (struct mt76x02_dev *, bool); + void (*pre_tbtt_enable)(struct mt76x02_dev *dev, bool en); + void (*beacon_enable)(struct mt76x02_dev *dev, bool en); }; +#define mt76x02_beacon_enable(dev, enable) \ + (dev)->beacon_ops->beacon_enable(dev, enable) +#define mt76x02_pre_tbtt_enable(dev, enable) \ + (dev)->beacon_ops->pre_tbtt_enable(dev, enable) + struct mt76x02_dev { struct mt76_dev mt76; /* must be first */ @@ -137,8 +131,8 @@ extern struct ieee80211_rate mt76x02_rates[12]; void mt76x02_init_device(struct mt76x02_dev *dev); void mt76x02_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, u64 multicast); + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast); int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, @@ -147,20 +141,20 @@ void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev); int mt76x02_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif); void mt76x02_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif); int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params); + struct ieee80211_ampdu_params *params); int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key); + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 queue, const struct ieee80211_tx_queue_params *params); + u16 queue, const struct ieee80211_tx_queue_params *params); void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta); + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, const struct ieee80211_tx_rate *rate); s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, @@ -183,8 +177,6 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info); -void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - const u8 *mac); void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); @@ -197,6 +189,7 @@ struct beacon_bc_data { struct sk_buff_head q; struct sk_buff *tail[8]; }; + void mt76x02_init_beacon_config(struct mt76x02_dev *dev); void mt76x02e_init_beacon_config(struct mt76x02_dev *dev); void mt76x02_resync_beacon_timer(struct mt76x02_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c index d61c686e08de..92305bd31aa1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76x02.h" @@ -115,53 +104,41 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, } EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon); -static void -__mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, - bool val, struct sk_buff *skb) -{ - u8 old_mask = dev->mt76.beacon_mask; - bool en; - u32 reg; - - if (val) { - dev->mt76.beacon_mask |= BIT(vif_idx); - if (skb) - mt76x02_mac_set_beacon(dev, vif_idx, skb); - } else { - dev->mt76.beacon_mask &= ~BIT(vif_idx); - mt76x02_mac_set_beacon(dev, vif_idx, NULL); - } - - if (!!old_mask == !!dev->mt76.beacon_mask) - return; - - en = dev->mt76.beacon_mask; - - reg = MT_BEACON_TIME_CFG_BEACON_TX | - MT_BEACON_TIME_CFG_TBTT_EN | - MT_BEACON_TIME_CFG_TIMER_EN; - mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en); - - dev->beacon_ops->beacon_enable(dev, en); -} - void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, - struct ieee80211_vif *vif, bool val) + struct ieee80211_vif *vif, bool enable) { - u8 vif_idx = ((struct mt76x02_vif *)vif->drv_priv)->idx; - struct sk_buff *skb = NULL; - - dev->beacon_ops->pre_tbtt_enable(dev, false); + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; + u8 old_mask = dev->mt76.beacon_mask; - if (mt76_is_usb(dev)) - skb = ieee80211_beacon_get(mt76_hw(dev), vif); + mt76x02_pre_tbtt_enable(dev, false); if (!dev->mt76.beacon_mask) dev->tbtt_count = 0; - __mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb); + if (enable) { + dev->mt76.beacon_mask |= BIT(mvif->idx); + } else { + dev->mt76.beacon_mask &= ~BIT(mvif->idx); + mt76x02_mac_set_beacon(dev, mvif->idx, NULL); + } - dev->beacon_ops->pre_tbtt_enable(dev, true); + if (!!old_mask == !!dev->mt76.beacon_mask) + goto out; + + if (dev->mt76.beacon_mask) + mt76_set(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_BEACON_TX | + MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_TIMER_EN); + else + mt76_clear(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_BEACON_TX | + MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_TIMER_EN); + mt76x02_beacon_enable(dev, !!dev->mt76.beacon_mask); + +out: + mt76x02_pre_tbtt_enable(dev, true); } void @@ -237,7 +214,8 @@ mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) } void -mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev, struct beacon_bc_data *data, +mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev, + struct beacon_bc_data *data, int max_nframes) { int i, nframes; @@ -281,4 +259,3 @@ void mt76x02_init_beacon_config(struct mt76x02_dev *dev) } EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config); - diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c index 1b1e424ccbb2..0cb2a7b35fe5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/debugfs.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c index 50e9b310e496..5dec33ed8527 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76x02.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h index 0408613b45a4..491010a32247 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x02_DFS_H diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h index 6394010a565f..4aff4f8e87b6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x02_DMA_H diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c index 07f0496d828a..c54c50fd639a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <asm/unaligned.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h index 0ba536de3d6e..99941a4700f3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x02_EEPROM_H diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 82bafb5ac326..abacb4ea7179 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76x02.h" @@ -92,7 +81,6 @@ void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx, atomic64_set(&key->tx_pn, pn); } - int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, struct ieee80211_key_conf *key) { @@ -267,7 +255,7 @@ bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev, static int mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, - enum nl80211_band band) + enum nl80211_band band) { u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); @@ -343,7 +331,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, ieee80211_has_protected(hdr->frame_control)) { wcid = NULL; ieee80211_get_tx_rates(info->control.vif, sta, skb, - info->control.rates, 1); + info->control.rates, 1); } if (wcid) @@ -353,6 +341,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, if (wcid && wcid->sw_iv && key) { u64 pn = atomic64_inc_return(&key->tx_pn); + ccmp_pn[0] = pn; ccmp_pn[1] = pn >> 8; ccmp_pn[2] = 0; @@ -445,8 +434,8 @@ mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy) case MT_PHY_TYPE_HT: /* MCS 8 falls back to MCS 0 */ if (rates[0].idx == 8) { - rates[1].idx = 0; - break; + rates[1].idx = 0; + break; } /* fall through */ default: @@ -568,9 +557,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, u32 stat_val, stat_cache; stat_val = stat->rate; - stat_val |= ((u32) stat->retry) << 16; + stat_val |= ((u32)stat->retry) << 16; stat_cache = msta->status.rate; - stat_cache |= ((u32) msta->status.retry) << 16; + stat_cache |= ((u32)msta->status.retry) << 16; if (*update == 0 && stat_val == stat_cache && stat->wcid == msta->status.wcid && msta->n_frames < 32) { @@ -718,7 +707,7 @@ mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain) int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, void *rxi) { - struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76x02_rxwi *rxwi = rxi; struct mt76x02_sta *sta; u32 rxinfo = le32_to_cpu(rxwi->rxinfo); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h index cb39da79527a..efa4ef945e35 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76X02_MAC_H @@ -207,7 +196,7 @@ void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr); int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, struct sk_buff *skb); void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, - struct ieee80211_vif *vif, bool val); + struct ieee80211_vif *vif, bool enable); void mt76x02_edcca_init(struct mt76x02_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c index 6501b853b65c..4be7a24097cc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> @@ -65,7 +54,7 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, break; } - rxfce = (u32 *) skb->cb; + rxfce = (u32 *)skb->cb; if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce)) check_seq = true; @@ -86,11 +75,11 @@ int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func, u32 val) { struct { - __le32 id; - __le32 value; + __le32 id; + __le32 value; } __packed __aligned(4) msg = { - .id = cpu_to_le32(func), - .value = cpu_to_le32(val), + .id = cpu_to_le32(func), + .value = cpu_to_le32(val), }; bool wait = false; @@ -111,7 +100,8 @@ int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on) .level = cpu_to_le32(0), }; - return mt76_mcu_send_msg(dev, CMD_POWER_SAVING_OP, &msg, sizeof(msg), false); + return mt76_mcu_send_msg(dev, CMD_POWER_SAVING_OP, &msg, sizeof(msg), + false); } EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h index a7b0d3e5df1d..c81a9655c4c9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x02_MCU_H diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 467b28379870..dc773070481d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> @@ -97,7 +86,8 @@ void mt76x02e_init_beacon_config(struct mt76x02_dev *dev) dev->beacon_ops = &beacon_ops; /* Fire a pre-TBTT interrupt 8 ms before TBTT */ - mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, 8 << 4); + mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, + 8 << 4); mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER, MT_DFS_GP_INTERVAL); mt76_wr(dev, MT_INT_TIMER_EN, 0); @@ -201,7 +191,7 @@ int mt76x02_dma_init(struct mt76x02_dev *dev) return -ENOMEM; tasklet_init(&dev->mt76.tx_tasklet, mt76x02_tx_tasklet, - (unsigned long) dev); + (unsigned long)dev); tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet, (unsigned long)dev); @@ -395,12 +385,12 @@ static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct mt76_wcid *wcid; if (!sta) - return; + return; - wcid = (struct mt76_wcid *) sta->drv_priv; + wcid = (struct mt76_wcid *)sta->drv_priv; if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv) - return; + return; mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c index a54b63a96eae..d7334267b530 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> @@ -183,7 +172,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev) bool ret = false; u32 false_cca; - false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1)); + false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, + mt76_rr(dev, MT_RX_STAT_1)); dev->cal.false_cca = false_cca; if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) { dev->cal.agc_gain_adjust += 2; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h index d2971db06f13..fc2e41006a0d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x02_PHY_H diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h index ea7833964ec0..21c0f351fa09 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76X02_REGS_H @@ -19,8 +8,8 @@ #define MT_ASIC_VERSION 0x0000 -#define MT76XX_REV_E3 0x22 -#define MT76XX_REV_E4 0x33 +#define MT76XX_REV_E3 0x22 +#define MT76XX_REV_E4 0x33 #define MT_CMB_CTRL 0x0020 #define MT_CMB_CTRL_XTAL_RDY BIT(22) @@ -120,7 +109,7 @@ #define MT_INT_RX_DONE(_n) BIT(_n) #define MT_INT_RX_DONE_ALL GENMASK(1, 0) #define MT_INT_TX_DONE_ALL GENMASK(13, 4) -#define MT_INT_TX_DONE(_n) BIT(_n + 4) +#define MT_INT_TX_DONE(_n) BIT((_n) + 4) #define MT_INT_RX_COHERENT BIT(16) #define MT_INT_TX_COHERENT BIT(17) #define MT_INT_ANY_COHERENT BIT(18) @@ -149,21 +138,21 @@ #define MT_WPDMA_DELAY_INT_CFG 0x0210 -#define MT_WMM_AIFSN 0x0214 +#define MT_WMM_AIFSN 0x0214 #define MT_WMM_AIFSN_MASK GENMASK(3, 0) #define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4) -#define MT_WMM_CWMIN 0x0218 +#define MT_WMM_CWMIN 0x0218 #define MT_WMM_CWMIN_MASK GENMASK(3, 0) #define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4) -#define MT_WMM_CWMAX 0x021c +#define MT_WMM_CWMAX 0x021c #define MT_WMM_CWMAX_MASK GENMASK(3, 0) #define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4) #define MT_WMM_TXOP_BASE 0x0220 #define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2)) -#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16) +#define MT_WMM_TXOP_SHIFT(_n) (((_n) & 1) * 16) #define MT_WMM_TXOP_MASK GENMASK(15, 0) #define MT_WMM_CTRL 0x0230 /* MT76x0 */ @@ -607,7 +596,7 @@ #define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \ MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \ - MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2)) + MT_TX_AGG_CNT_BASE1 + (((_id) - 8) << 2)) #define MT_TX_STAT_FIFO_EXT 0x1798 #define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0) @@ -680,17 +669,17 @@ #define MT_SKEY_BASE_0 0xac00 #define MT_SKEY_BASE_1 0xb400 -#define MT_SKEY_0(_bss, _idx) (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32) -#define MT_SKEY_1(_bss, _idx) (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32) -#define MT_SKEY(_bss, _idx) ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx)) +#define MT_SKEY_0(_bss, _idx) (MT_SKEY_BASE_0 + (4 * (_bss) + (_idx)) * 32) +#define MT_SKEY_1(_bss, _idx) (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + (_idx)) * 32) +#define MT_SKEY(_bss, _idx) (((_bss) & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx)) #define MT_SKEY_MODE_BASE_0 0xb000 #define MT_SKEY_MODE_BASE_1 0xb3f0 -#define MT_SKEY_MODE_0(_bss) (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2)) +#define MT_SKEY_MODE_0(_bss) (MT_SKEY_MODE_BASE_0 + (((_bss) / 2) << 2)) #define MT_SKEY_MODE_1(_bss) (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2)) -#define MT_SKEY_MODE(_bss) ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss)) +#define MT_SKEY_MODE(_bss) (((_bss) & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss)) #define MT_SKEY_MODE_MASK GENMASK(3, 0) -#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1))) +#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * ((_bss) & 1))) #define MT_BEACON_BASE 0xc000 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c index 5b42d2c87937..a812c3a1e258 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h index 713f12d3c8de..61ecaf0fe065 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if !defined(__MT76x02_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) @@ -25,7 +14,8 @@ #define MAXNAME 32 #define DEV_ENTRY __array(char, wiphy_name, 32) -#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(mt76_hw(dev)->wiphy), MAXNAME) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ + wiphy_name(mt76_hw(dev)->wiphy), MAXNAME) #define DEV_PR_FMT "%s" #define DEV_PR_ARG __entry->wiphy_name diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c index 04118f08debc..f27aade34c1e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h index 7b53f9e57f29..98329debc033 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x02_USB_H diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index e4332d5a5757..78dfc1e7f27b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76x02_usb.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c index 0cb8751321a1..a993cd7e9948 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index ad5323447ed4..aec73a0295e8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> @@ -21,14 +10,14 @@ #define CCK_RATE(_idx, _rate) { \ .bitrate = _rate, \ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ - .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \ - .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \ + .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \ + .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + (_idx)), \ } #define OFDM_RATE(_idx, _rate) { \ .bitrate = _rate, \ - .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \ - .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \ + .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ + .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ } struct ieee80211_rate mt76x02_rates[] = { @@ -61,6 +50,20 @@ static const struct ieee80211_iface_limit mt76x02_if_limits[] = { }, }; +static const struct ieee80211_iface_limit mt76x02u_if_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_ADHOC) + }, { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_AP) + }, +}; + static const struct ieee80211_iface_combination mt76x02_if_comb[] = { { .limits = mt76x02_if_limits, @@ -75,6 +78,16 @@ static const struct ieee80211_iface_combination mt76x02_if_comb[] = { } }; +static const struct ieee80211_iface_combination mt76x02u_if_comb[] = { + { + .limits = mt76x02u_if_limits, + .n_limits = ARRAY_SIZE(mt76x02u_if_limits), + .max_interfaces = 2, + .num_different_channels = 1, + .beacon_int_infra_match = true, + } +}; + static void mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on, u8 delay_off) @@ -151,6 +164,8 @@ void mt76x02_init_device(struct mt76x02_dev *dev) if (mt76_is_usb(dev)) { hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN; + wiphy->iface_combinations = mt76x02u_if_comb; + wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02u_if_comb); } else { INIT_DELAYED_WORK(&dev->wdt_work, mt76x02_wdt_work); @@ -281,7 +296,7 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, mvif->idx = idx; mvif->group_wcid.idx = MT_VIF_WCID(idx); mvif->group_wcid.hw_key_idx = -1; - mtxq = (struct mt76_txq *) vif->txq->drv_priv; + mtxq = (struct mt76_txq *)vif->txq->drv_priv; mtxq->wcid = &mvif->group_wcid; mt76_txq_init(&dev->mt76, vif->txq); @@ -345,10 +360,10 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action = params->action; struct ieee80211_sta *sta = params->sta; struct mt76x02_dev *dev = hw->priv; - struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; + struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; struct ieee80211_txq *txq = sta->txq[params->tid]; u16 tid = params->tid; - u16 *ssn = ¶ms->ssn; + u16 ssn = params->ssn; struct mt76_txq *mtxq; if (!txq) @@ -359,7 +374,7 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, switch (action) { case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, - *ssn, params->buf_size); + ssn, params->buf_size); mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); break; case IEEE80211_AMPDU_RX_STOP: @@ -375,10 +390,9 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); break; case IEEE80211_AMPDU_TX_START: - mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn); + mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn); ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_STOP_CONT: @@ -434,7 +448,7 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) return -EOPNOTSUPP; - msta = sta ? (struct mt76x02_sta *) sta->drv_priv : NULL; + msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL; wcid = msta ? &msta->wcid : &mvif->group_wcid; if (cmd == SET_KEY) { @@ -558,11 +572,11 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val) EXPORT_SYMBOL_GPL(mt76x02_set_rts_threshold); void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct mt76x02_dev *dev = hw->priv; - struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; + struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates); struct ieee80211_tx_rate rate = {}; @@ -588,15 +602,6 @@ void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len) } EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad); -void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - const u8 *mac) -{ - struct mt76x02_dev *dev = hw->priv; - - set_bit(MT76_SCANNING, &dev->mt76.state); -} -EXPORT_SYMBOL_GPL(mt76x02_sw_scan); - void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig index 1f69908f8373..5fd4973e32df 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig @@ -8,8 +8,12 @@ config MT76x2E select MT76x2_COMMON depends on MAC80211 depends on PCI - ---help--- - This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices. + help + This adds support for MT7612/MT7602/MT7662-based wireless PCIe + devices, which comply with IEEE 802.11ac standards and support + 2SS to 866Mbit/s PHY rate. + + To compile this driver as a module, choose M here. config MT76x2U tristate "MediaTek MT76x2U (USB) support" @@ -18,4 +22,8 @@ config MT76x2U depends on MAC80211 depends on USB help - This adds support for MT7612U-based wireless USB dongles. + This adds support for MT7612U-based wireless USB 3.0 dongles, + which comply with IEEE 802.11ac standards and support 2SS to + 866Mbit/s PHY rate. + + To compile this driver as a module, choose M here. diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index 6f6998561d9d..9f91556c7f38 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> @@ -33,7 +22,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev) static bool mt76x2_has_cal_free_data(struct mt76x02_dev *dev, u8 *efuse) { - u16 *efuse_w = (u16 *) efuse; + u16 *efuse_w = (u16 *)efuse; if (efuse_w[MT_EE_NIC_CONF_0] != 0) return false; @@ -372,7 +361,8 @@ mt76x2_get_power_info_2g(struct mt76x02_dev *dev, t->chain[chain].tssi_slope = data[0]; t->chain[chain].tssi_offset = data[1]; t->chain[chain].target_power = data[2]; - t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7); + t->chain[chain].delta = + mt76x02_sign_extend_optional(data[delta_idx], 7); val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER); t->target_power = val >> 8; @@ -381,7 +371,7 @@ mt76x2_get_power_info_2g(struct mt76x02_dev *dev, static void mt76x2_get_power_info_5g(struct mt76x02_dev *dev, struct mt76x2_tx_power_info *t, - struct ieee80211_channel *chan, + struct ieee80211_channel *chan, int chain, int offset) { int channel = chan->hw_value; @@ -423,7 +413,8 @@ mt76x2_get_power_info_5g(struct mt76x02_dev *dev, t->chain[chain].tssi_slope = data[0]; t->chain[chain].tssi_offset = data[1]; t->chain[chain].target_power = data[2]; - t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7); + t->chain[chain].delta = + mt76x02_sign_extend_optional(data[delta_idx], 7); val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN); t->target_power = val & 0xff; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h index 9e735524d367..4dcf6518cb0d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x2_EEPROM_H diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c index 97c3543eed8a..79e583eb066b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76x2.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c index e99d4c9bd428..e08740ca3d0c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76x2.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h index 42ff221d7706..a1583021e1e9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x2_MAC_H diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c index cd3e082f486c..76d8cd37d4de 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h index 40ef43926c06..41fd66563e82 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x2_MCU_H @@ -71,7 +60,8 @@ struct mt76x2_tssi_comp { u8 offset1; } __packed __aligned(4); -int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, struct mt76x2_tssi_comp *tssi_data); +int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, + struct mt76x2_tssi_comp *tssi_data); int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain, bool force); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h index d7abe3d73bad..41680c420cda 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x2_H diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h index 76cb1f84eff5..c876bac43751 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MT76x2U_H diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index e84d5c5911ea..73c3104f8858 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index 71aea2832644..343127f2d621 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/delay.h> @@ -336,4 +325,3 @@ fail: return ret; } - diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index 3a1467326f4d..4971685aafe8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76x2.h" @@ -176,7 +165,7 @@ const struct ieee80211_ops mt76x2_ops = { .sta_state = mt76_sta_state, .set_key = mt76x02_set_key, .conf_tx = mt76x02_conf_tx, - .sw_scan_start = mt76x02_sw_scan, + .sw_scan_start = mt76_sw_scan, .sw_scan_complete = mt76x02_sw_scan_complete, .flush = mt76x2_flush, .ampdu_action = mt76x02_ampdu_action, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c index 605dc66ae83b..ca6f968411ac 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> @@ -66,7 +55,7 @@ mt76pci_load_rom_patch(struct mt76x02_dev *dev) mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET); - cur = (__le32 *) (fw->data + sizeof(*hdr)); + cur = (__le32 *)(fw->data + sizeof(*hdr)); len = fw->size - sizeof(*hdr); mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len); @@ -121,7 +110,7 @@ mt76pci_load_firmware(struct mt76x02_dev *dev) dev_info(dev->mt76.dev, "Build: %x\n", val); dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time); - cur = (__le32 *) (fw->data + sizeof(*hdr)); + cur = (__le32 *)(fw->data + sizeof(*hdr)); len = le32_to_cpu(hdr->ilm_len); mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c index 2edf1bd0c18c..23f35bf8d47b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/delay.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c index cdedf95ca4f5..edbab4fa7f6e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76x2.h" @@ -25,7 +14,8 @@ mt76x2_adjust_high_lna_gain(struct mt76x02_dev *dev, int reg, s8 offset) { s8 gain; - gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg))); + gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, + mt76_rr(dev, MT_BBP(AGC, reg))); gain -= offset / 2; mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain); } @@ -295,7 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) dev->cal.avg_rssi_all = -75; low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + - (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); gain_change = dev->cal.low_gain < 0 || (dev->cal.low_gain & 2) ^ (low_gain & 2); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 7a994a783510..da5e0f9a8bae 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> @@ -36,6 +25,7 @@ static int mt76x2u_probe(struct usb_interface *intf, const struct usb_device_id *id) { static const struct mt76_driver_ops drv_ops = { + .update_survey = mt76x02_update_channel, .tx_prepare_skb = mt76x02u_tx_prepare_skb, .tx_complete_skb = mt76x02u_tx_complete_skb, .tx_status_data = mt76x02_tx_status_data, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c index 94f52f98019b..e305b374c904 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/delay.h> @@ -195,6 +184,13 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev) mt76x02_phy_set_rxpath(dev); mt76x02_phy_set_txdac(dev); + mt76_wr(dev, MT_CH_TIME_CFG, + MT_CH_TIME_CFG_TIMER_EN | + MT_CH_TIME_CFG_TX_AS_BUSY | + MT_CH_TIME_CFG_RX_AS_BUSY | + MT_CH_TIME_CFG_NAV_AS_BUSY | + MT_CH_TIME_CFG_EIFS_AS_BUSY); + return mt76x2u_mac_stop(dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c index 3b82345756ea..e7fea3a6f1fd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76x2u.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c index e4dfc3bea3c5..eb73cb856c81 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76x2u.h" @@ -48,7 +37,7 @@ mt76x2u_set_channel(struct mt76x02_dev *dev, int err; cancel_delayed_work_sync(&dev->cal_work); - dev->beacon_ops->pre_tbtt_enable(dev, false); + mt76x02_pre_tbtt_enable(dev, false); mutex_lock(&dev->mt76.mutex); set_bit(MT76_RESET, &dev->mt76.state); @@ -59,12 +48,16 @@ mt76x2u_set_channel(struct mt76x02_dev *dev, err = mt76x2u_phy_set_channel(dev, chandef); + /* channel cycle counters read-and-clear */ + mt76_rr(dev, MT_CH_IDLE); + mt76_rr(dev, MT_CH_BUSY); + mt76x2_mac_resume(dev); clear_bit(MT76_RESET, &dev->mt76.state); mutex_unlock(&dev->mt76.mutex); - dev->beacon_ops->pre_tbtt_enable(dev, true); + mt76x02_pre_tbtt_enable(dev, true); mt76_txq_schedule_all(&dev->mt76); return err; @@ -121,10 +114,11 @@ const struct ieee80211_ops mt76x2u_ops = { .bss_info_changed = mt76x02_bss_info_changed, .configure_filter = mt76x02_configure_filter, .conf_tx = mt76x02_conf_tx, - .sw_scan_start = mt76x02_sw_scan, + .sw_scan_start = mt76_sw_scan, .sw_scan_complete = mt76x02_sw_scan_complete, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .get_txpower = mt76_get_txpower, + .get_survey = mt76_get_survey, .set_tim = mt76_set_tim, .release_buffered_frames = mt76_release_buffered_frames, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c index 152d41fe9ff5..dd22d8af0901 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/firmware.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c index dfd54f9b0e97..b1381f9df992 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76x2u.h" diff --git a/drivers/net/wireless/mediatek/mt76/trace.c b/drivers/net/wireless/mediatek/mt76/trace.c index ea4ab8729ae4..ed3df3c8b4b3 100644 --- a/drivers/net/wireless/mediatek/mt76/trace.c +++ b/drivers/net/wireless/mediatek/mt76/trace.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h index ea30895933c5..0b3e635da868 100644 --- a/drivers/net/wireless/mediatek/mt76/trace.h +++ b/drivers/net/wireless/mediatek/mt76/trace.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if !defined(__MT76_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) @@ -24,10 +13,11 @@ #define TRACE_SYSTEM mt76 #define MAXNAME 32 -#define DEV_ENTRY __array(char, wiphy_name, 32) -#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME) -#define DEV_PR_FMT "%s" -#define DEV_PR_ARG __entry->wiphy_name +#define DEV_ENTRY __array(char, wiphy_name, 32) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ + wiphy_name(dev->hw->wiphy), MAXNAME) +#define DEV_PR_FMT "%s" +#define DEV_PR_ARG __entry->wiphy_name #define REG_ENTRY __field(u32, reg) __field(u32, val) #define REG_ASSIGN __entry->reg = reg; __entry->val = val diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 5397827668b9..c22a05f06fd0 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mt76.h" @@ -97,7 +86,7 @@ mt76_txq_get_qid(struct ieee80211_txq *txq) static void mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_data_qos(hdr->frame_control) || !ieee80211_is_data_present(hdr->frame_control)) @@ -217,8 +206,8 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, if (cb->pktid == pktid) return skb; - if (pktid >= 0 && - !time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT)) + if (pktid >= 0 && !time_after(jiffies, cb->jiffies + + MT_TX_STATUS_SKB_TIMEOUT)) continue; __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | @@ -260,7 +249,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct mt76_queue *q; int qid = skb_get_queue_mapping(skb); @@ -280,7 +269,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; txq = sta->txq[tid]; - mtxq = (struct mt76_txq *) txq->drv_priv; + mtxq = (struct mt76_txq *)txq->drv_priv; if (mtxq->aggr) mt76_check_agg_ssn(mtxq, skb); @@ -328,7 +317,7 @@ static void mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, struct sk_buff *skb, bool last) { - struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv; + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; @@ -354,7 +343,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, spin_lock_bh(&hwq->lock); for (i = 0; tids && nframes; i++, tids >>= 1) { struct ieee80211_txq *txq = sta->txq[i]; - struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; + struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; struct sk_buff *skb; if (!(tids & 1)) @@ -438,8 +427,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq, if (probe) break; - if (test_bit(MT76_OFFCHANNEL, &dev->state) || - test_bit(MT76_RESET, &dev->state)) + if (test_bit(MT76_RESET, &dev->state)) return -EBUSY; skb = mt76_txq_dequeue(dev, mtxq, false); @@ -498,8 +486,7 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid) if (sq->swq_queued >= 4) break; - if (test_bit(MT76_OFFCHANNEL, &dev->state) || - test_bit(MT76_RESET, &dev->state)) { + if (test_bit(MT76_RESET, &dev->state)) { ret = -EBUSY; break; } @@ -568,6 +555,13 @@ void mt76_txq_schedule_all(struct mt76_dev *dev) } EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); +void mt76_tx_tasklet(unsigned long data) +{ + struct mt76_dev *dev = (struct mt76_dev *)data; + + mt76_txq_schedule_all(dev); +} + void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, bool send_bar) { @@ -610,7 +604,7 @@ void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) if (!txq) return; - mtxq = (struct mt76_txq *) txq->drv_priv; + mtxq = (struct mt76_txq *)txq->drv_priv; while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) ieee80211_free_txskb(dev->hw, skb); @@ -619,7 +613,7 @@ EXPORT_SYMBOL_GPL(mt76_txq_remove); void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) { - struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; + struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; skb_queue_head_init(&mtxq->retry_q); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index fb87ce7fbdf6..20c6fe510e9d 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> @@ -95,9 +84,9 @@ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr) ret = __mt76u_vendor_request(dev, req, USB_DIR_IN | USB_TYPE_VENDOR, - 0, offset, usb->data, sizeof(__le32)); + 0, offset, &usb->reg_val, sizeof(__le32)); if (ret == sizeof(__le32)) - data = get_unaligned_le32(usb->data); + data = le32_to_cpu(usb->reg_val); trace_usb_reg_rr(dev, addr, data); return data; @@ -131,10 +120,10 @@ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) } offset = addr & ~MT_VEND_TYPE_MASK; - put_unaligned_le32(val, usb->data); + usb->reg_val = cpu_to_le32(val); __mt76u_vendor_request(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR, 0, - offset, usb->data, sizeof(__le32)); + offset, &usb->reg_val, sizeof(__le32)); trace_usb_reg_wr(dev, addr, val); } @@ -164,12 +153,12 @@ static void mt76u_copy(struct mt76_dev *dev, u32 offset, int i, ret; mutex_lock(&usb->usb_ctrl_mtx); - for (i = 0; i < (len / 4); i++) { - put_unaligned_le32(val[i], usb->data); + for (i = 0; i < DIV_ROUND_UP(len, 4); i++) { + put_unaligned(val[i], (u32 *)usb->data); ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE, USB_DIR_OUT | USB_TYPE_VENDOR, 0, offset + i * 4, usb->data, - sizeof(__le32)); + sizeof(u32)); if (ret < 0) break; } @@ -309,7 +298,7 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, } urb->num_sgs = max_t(int, i, urb->num_sgs); - urb->transfer_buffer_length = urb->num_sgs * q->buf_size, + urb->transfer_buffer_length = urb->num_sgs * q->buf_size; sg_init_marker(urb->sg, urb->num_sgs); return i ? : -ENOMEM; @@ -320,14 +309,13 @@ mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp) { struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; - if (dev->usb.sg_en) { + if (dev->usb.sg_en) return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp); - } else { - urb->transfer_buffer_length = q->buf_size; - urb->transfer_buffer = page_frag_alloc(&q->rx_page, - q->buf_size, gfp); - return urb->transfer_buffer ? 0 : -ENOMEM; - } + + urb->transfer_buffer_length = q->buf_size; + urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp); + + return urb->transfer_buffer ? 0 : -ENOMEM; } static int @@ -763,13 +751,14 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb, if (!dev->usb.sg_en) { urb->transfer_buffer = skb->data; return 0; - } else { - sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE); - urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); - if (urb->num_sgs == 0) - return -ENOMEM; - return urb->num_sgs; } + + sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE); + urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); + if (!urb->num_sgs) + return -ENOMEM; + + return urb->num_sgs; } static int @@ -885,7 +874,8 @@ void mt76u_stop_tx(struct mt76_dev *dev) struct mt76_queue *q; int i, j, ret; - ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), HZ/5); + ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), + HZ / 5); if (!ret) { dev_err(dev->dev, "timed out waiting for pending tx\n"); @@ -957,7 +947,7 @@ int mt76u_init(struct mt76_dev *dev, .rr = mt76u_rr, .wr = mt76u_wr, .rmw = mt76u_rmw, - .copy = mt76u_copy, + .write_copy = mt76u_copy, .wr_rp = mt76u_wr_rp, .rd_rp = mt76u_rd_rp, .type = MT76_BUS_USB, diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.c b/drivers/net/wireless/mediatek/mt76/usb_trace.c index 7e1f540f0b7a..9942bdd6177b 100644 --- a/drivers/net/wireless/mediatek/mt76/usb_trace.c +++ b/drivers/net/wireless/mediatek/mt76/usb_trace.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h index b56c32343eb1..f5ab3215af80 100644 --- a/drivers/net/wireless/mediatek/mt76/usb_trace.h +++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if !defined(__MT76_USB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) @@ -24,10 +13,11 @@ #define TRACE_SYSTEM mt76_usb #define MAXNAME 32 -#define DEV_ENTRY __array(char, wiphy_name, 32) -#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME) -#define DEV_PR_FMT "%s " -#define DEV_PR_ARG __entry->wiphy_name +#define DEV_ENTRY __array(char, wiphy_name, 32) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ + wiphy_name(dev->hw->wiphy), MAXNAME) +#define DEV_PR_FMT "%s " +#define DEV_PR_ARG __entry->wiphy_name #define REG_ENTRY __field(u32, reg) __field(u32, val) #define REG_ASSIGN __entry->reg = reg; __entry->val = val @@ -65,7 +55,7 @@ DECLARE_EVENT_CLASS(urb_transfer, TP_PROTO(struct mt76_dev *dev, struct urb *u), TP_ARGS(dev, u), TP_STRUCT__entry( - DEV_ENTRY __field(unsigned, pipe) __field(u32, len) + DEV_ENTRY __field(unsigned int, pipe) __field(u32, len) ), TP_fast_assign( DEV_ASSIGN; diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c index 69270c1a9091..23d1e1da78b2 100644 --- a/drivers/net/wireless/mediatek/mt76/util.c +++ b/drivers/net/wireless/mediatek/mt76/util.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h index 9f8d59269a9e..fe3479c8e561 100644 --- a/drivers/net/wireless/mediatek/mt76/util.h +++ b/drivers/net/wireless/mediatek/mt76/util.h @@ -12,7 +12,7 @@ #include <linux/bitfield.h> #define MT76_INCR(_var, _size) \ - _var = (((_var) + 1) % _size) + (_var = (((_var) + 1) % (_size))) int mt76_wcid_alloc(unsigned long *mask, int size); @@ -25,7 +25,7 @@ mt76_wcid_free(unsigned long *mask, int idx) static inline void mt76_skb_set_moredata(struct sk_buff *skb, bool enable) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (enable) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c index 9bfac9f1d47f..cada48800928 100644 --- a/drivers/net/wireless/mediatek/mt7601u/init.c +++ b/drivers/net/wireless/mediatek/mt7601u/init.c @@ -557,6 +557,9 @@ mt76_init_sband_2g(struct mt7601u_dev *dev) { dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g), GFP_KERNEL); + if (!dev->sband_2g) + return -ENOMEM; + dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g; WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num > diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c index 89a7b1234ffb..72e608cc53af 100644 --- a/drivers/net/wireless/mediatek/mt7601u/main.c +++ b/drivers/net/wireless/mediatek/mt7601u/main.c @@ -351,7 +351,7 @@ mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; - u16 *ssn = ¶ms->ssn; + u16 ssn = params->ssn; struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; WARN_ON(msta->wcid.idx > GROUP_WCID(0)); @@ -371,7 +371,7 @@ mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: break; case IEEE80211_AMPDU_TX_START: - msta->agg_ssn[tid] = *ssn << 4; + msta->agg_ssn[tid] = ssn << 4; ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_STOP_CONT: diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c index e4e9344b6982..8ae318b5fe54 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c @@ -430,7 +430,7 @@ static int qtnf_pcie_suspend(struct device *dev) struct qtnf_pcie_bus_priv *priv; struct qtnf_bus *bus; - bus = pci_get_drvdata(to_pci_dev(dev)); + bus = dev_get_drvdata(dev); if (!bus) return -EFAULT; @@ -443,7 +443,7 @@ static int qtnf_pcie_resume(struct device *dev) struct qtnf_pcie_bus_priv *priv; struct qtnf_bus *bus; - bus = pci_get_drvdata(to_pci_dev(dev)); + bus = dev_get_drvdata(dev); if (!bus) return -EFAULT; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index fdf0504b5f1d..0dfb55c69b73 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -1086,6 +1086,7 @@ static const struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0846, 0x9013) }, { USB_DEVICE(0x0846, 0x9019) }, /* Planex */ + { USB_DEVICE(0x2019, 0xed14) }, { USB_DEVICE(0x2019, 0xed19) }, /* Ralink */ { USB_DEVICE(0x148f, 0x3573) }, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index ef5f51512212..4d4e3888ef20 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c @@ -65,26 +65,6 @@ struct rt2x00debug_intf { * - crypto stats file */ struct dentry *driver_folder; - struct dentry *driver_entry; - struct dentry *chipset_entry; - struct dentry *dev_flags; - struct dentry *cap_flags; - struct dentry *restart_hw; - struct dentry *register_folder; - struct dentry *csr_off_entry; - struct dentry *csr_val_entry; - struct dentry *eeprom_off_entry; - struct dentry *eeprom_val_entry; - struct dentry *bbp_off_entry; - struct dentry *bbp_val_entry; - struct dentry *rf_off_entry; - struct dentry *rf_val_entry; - struct dentry *rfcsr_off_entry; - struct dentry *rfcsr_val_entry; - struct dentry *queue_folder; - struct dentry *queue_frame_dump_entry; - struct dentry *queue_stats_entry; - struct dentry *crypto_stats_entry; /* * The frame dump file only allows a single reader, @@ -596,39 +576,34 @@ static const struct file_operations rt2x00debug_restart_hw = { .llseek = generic_file_llseek, }; -static struct dentry *rt2x00debug_create_file_driver(const char *name, - struct rt2x00debug_intf - *intf, - struct debugfs_blob_wrapper - *blob) +static void rt2x00debug_create_file_driver(const char *name, + struct rt2x00debug_intf *intf, + struct debugfs_blob_wrapper *blob) { char *data; data = kzalloc(3 * MAX_LINE_LENGTH, GFP_KERNEL); if (!data) - return NULL; + return; blob->data = data; data += sprintf(data, "driver:\t%s\n", intf->rt2x00dev->ops->name); data += sprintf(data, "version:\t%s\n", DRV_VERSION); blob->size = strlen(blob->data); - return debugfs_create_blob(name, 0400, intf->driver_folder, blob); + debugfs_create_blob(name, 0400, intf->driver_folder, blob); } -static struct dentry *rt2x00debug_create_file_chipset(const char *name, - struct rt2x00debug_intf - *intf, - struct - debugfs_blob_wrapper - *blob) +static void rt2x00debug_create_file_chipset(const char *name, + struct rt2x00debug_intf *intf, + struct debugfs_blob_wrapper *blob) { const struct rt2x00debug *debug = intf->debug; char *data; data = kzalloc(9 * MAX_LINE_LENGTH, GFP_KERNEL); if (!data) - return NULL; + return; blob->data = data; data += sprintf(data, "rt chip:\t%04x\n", intf->rt2x00dev->chip.rt); @@ -654,13 +629,15 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name, blob->size = strlen(blob->data); - return debugfs_create_blob(name, 0400, intf->driver_folder, blob); + debugfs_create_blob(name, 0400, intf->driver_folder, blob); } void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) { const struct rt2x00debug *debug = rt2x00dev->ops->debugfs; struct rt2x00debug_intf *intf; + struct dentry *queue_folder; + struct dentry *register_folder; intf = kzalloc(sizeof(struct rt2x00debug_intf), GFP_KERNEL); if (!intf) { @@ -676,43 +653,27 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) debugfs_create_dir(intf->rt2x00dev->ops->name, rt2x00dev->hw->wiphy->debugfsdir); - intf->driver_entry = - rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob); + rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob); + rt2x00debug_create_file_chipset("chipset", intf, &intf->chipset_blob); + debugfs_create_file("dev_flags", 0400, intf->driver_folder, intf, + &rt2x00debug_fop_dev_flags); + debugfs_create_file("cap_flags", 0400, intf->driver_folder, intf, + &rt2x00debug_fop_cap_flags); + debugfs_create_file("restart_hw", 0200, intf->driver_folder, intf, + &rt2x00debug_restart_hw); - intf->chipset_entry = - rt2x00debug_create_file_chipset("chipset", - intf, &intf->chipset_blob); - - intf->dev_flags = debugfs_create_file("dev_flags", 0400, - intf->driver_folder, intf, - &rt2x00debug_fop_dev_flags); - - intf->cap_flags = debugfs_create_file("cap_flags", 0400, - intf->driver_folder, intf, - &rt2x00debug_fop_cap_flags); - - intf->restart_hw = debugfs_create_file("restart_hw", 0200, - intf->driver_folder, intf, - &rt2x00debug_restart_hw); - - intf->register_folder = - debugfs_create_dir("register", intf->driver_folder); + register_folder = debugfs_create_dir("register", intf->driver_folder); #define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ ({ \ if (debug->__name.read) { \ - (__intf)->__name##_off_entry = \ - debugfs_create_u32(__stringify(__name) "_offset", \ - 0600, \ - (__intf)->register_folder, \ - &(__intf)->offset_##__name); \ + debugfs_create_u32(__stringify(__name) "_offset", 0600, \ + register_folder, \ + &(__intf)->offset_##__name); \ \ - (__intf)->__name##_val_entry = \ - debugfs_create_file(__stringify(__name) "_value", \ - 0600, \ - (__intf)->register_folder, \ - (__intf), \ - &rt2x00debug_fop_##__name); \ + debugfs_create_file(__stringify(__name) "_value", 0600, \ + register_folder, (__intf), \ + &rt2x00debug_fop_##__name); \ } \ }) @@ -724,26 +685,21 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) #undef RT2X00DEBUGFS_CREATE_REGISTER_ENTRY - intf->queue_folder = - debugfs_create_dir("queue", intf->driver_folder); + queue_folder = debugfs_create_dir("queue", intf->driver_folder); - intf->queue_frame_dump_entry = - debugfs_create_file("dump", 0400, intf->queue_folder, - intf, &rt2x00debug_fop_queue_dump); + debugfs_create_file("dump", 0400, queue_folder, intf, + &rt2x00debug_fop_queue_dump); skb_queue_head_init(&intf->frame_dump_skbqueue); init_waitqueue_head(&intf->frame_dump_waitqueue); - intf->queue_stats_entry = - debugfs_create_file("queue", 0400, intf->queue_folder, - intf, &rt2x00debug_fop_queue_stats); + debugfs_create_file("queue", 0400, queue_folder, intf, + &rt2x00debug_fop_queue_stats); #ifdef CONFIG_RT2X00_LIB_CRYPTO if (rt2x00_has_cap_hw_crypto(rt2x00dev)) - intf->crypto_stats_entry = - debugfs_create_file("crypto", 0444, intf->queue_folder, - intf, - &rt2x00debug_fop_crypto_stats); + debugfs_create_file("crypto", 0444, queue_folder, intf, + &rt2x00debug_fop_crypto_stats); #endif return; @@ -758,29 +714,7 @@ void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) skb_queue_purge(&intf->frame_dump_skbqueue); -#ifdef CONFIG_RT2X00_LIB_CRYPTO - debugfs_remove(intf->crypto_stats_entry); -#endif - debugfs_remove(intf->queue_stats_entry); - debugfs_remove(intf->queue_frame_dump_entry); - debugfs_remove(intf->queue_folder); - debugfs_remove(intf->rfcsr_val_entry); - debugfs_remove(intf->rfcsr_off_entry); - debugfs_remove(intf->rf_val_entry); - debugfs_remove(intf->rf_off_entry); - debugfs_remove(intf->bbp_val_entry); - debugfs_remove(intf->bbp_off_entry); - debugfs_remove(intf->eeprom_val_entry); - debugfs_remove(intf->eeprom_off_entry); - debugfs_remove(intf->csr_val_entry); - debugfs_remove(intf->csr_off_entry); - debugfs_remove(intf->register_folder); - debugfs_remove(intf->dev_flags); - debugfs_remove(intf->restart_hw); - debugfs_remove(intf->cap_flags); - debugfs_remove(intf->chipset_entry); - debugfs_remove(intf->driver_entry); - debugfs_remove(intf->driver_folder); + debugfs_remove_recursive(intf->driver_folder); kfree(intf->chipset_blob.data); kfree(intf->driver_blob.data); kfree(intf); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 9d158237ac67..c3eab767bc21 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -371,9 +371,6 @@ static void rt2x00lib_fill_tx_status(struct rt2x00_dev *rt2x00dev, IEEE80211_TX_CTL_AMPDU; tx_info->status.ampdu_len = 1; tx_info->status.ampdu_ack_len = success ? 1 : 0; - - if (!success) - tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; } if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index 7e3a621b9c0d..bc2dfef0de22 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -349,8 +349,7 @@ static void rt2x00usb_work_rxdone(struct work_struct *work) while (!rt2x00queue_empty(rt2x00dev->rx)) { entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE); - if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || - !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) + if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) break; /* @@ -389,8 +388,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb) rt2x00lib_dmadone(entry); /* - * Schedule the delayed work for reading the RX status - * from the device. + * Schedule the delayed work for processing RX data */ queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work); } @@ -402,8 +400,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) struct queue_entry_priv_usb *entry_priv = entry->priv_data; int status; - if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || - test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) + if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) return false; rt2x00lib_dmastart(entry); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index 3adb1d3d47ac..ceffe05bd65b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c @@ -1525,7 +1525,7 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) /* * WLAN action by PTA */ - rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04); + rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x0c); /* * BT select S0/S1 controlled by WiFi @@ -1568,9 +1568,14 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.ant_sel_rsv)); /* - * 0x280, 0x00, 0x200, 0x80 - not clear + * Different settings per different antenna position. + * Antenna Position: | Normal Inverse + * -------------------------------------------------- + * Antenna switch to BT: | 0x280, 0x00 + * Antenna switch to WiFi: | 0x0, 0x280 + * Antenna switch to PTA: | 0x200, 0x80 */ - rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x80); /* * Software control, antenna at WiFi side diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 8136e268b4e6..c6c41fb962ff 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -3891,12 +3891,13 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* Check if MAC is already powered on */ val8 = rtl8xxxu_read8(priv, REG_CR); + val16 = rtl8xxxu_read16(priv, REG_SYS_CLKR); /* * Fix 92DU-VC S3 hang with the reason is that secondary mac is not * initialized. First MAC returns 0xea, second MAC returns 0x00 */ - if (val8 == 0xea) + if (val8 == 0xea || !(val16 & SYS_CLK_MAC_CLK_ENABLE)) macpower = false; else macpower = true; diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 99565ad09cdc..e4a7e074ae3f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -46,15 +46,6 @@ enum ap_peer { #define MAX_LISTEN_INTERVAL 10 #define MAX_RATE_TRIES 4 -#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \ - WRITEEF2BYTE(_hdr, _val) -#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \ - WRITEEF1BYTE(_hdr, _val) -#define SET_80211_HDR_PWR_MGNT(_hdr, _val) \ - SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val) -#define SET_80211_HDR_TO_DS(_hdr, _val) \ - SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val) - #define SET_80211_PS_POLL_AID(_hdr, _val) \ (*(u16 *)((u8 *)(_hdr) + 2) = _val) #define SET_80211_PS_POLL_BSSID(_hdr, _val) \ @@ -62,30 +53,12 @@ enum ap_peer { #define SET_80211_PS_POLL_TA(_hdr, _val) \ ether_addr_copy(((u8 *)(_hdr))+10, (u8 *)(_val)) -#define SET_80211_HDR_DURATION(_hdr, _val) \ - (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val)) #define SET_80211_HDR_ADDRESS1(_hdr, _val) \ CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8 *)(_val)) #define SET_80211_HDR_ADDRESS2(_hdr, _val) \ CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS2, (u8 *)(_val)) #define SET_80211_HDR_ADDRESS3(_hdr, _val) \ CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val)) -#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \ - WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_SEQUENCE, _val) - -#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) \ - WRITEEF4BYTE(((u8 *)(__phdr)) + 24, __val) -#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \ - WRITEEF4BYTE(((u8 *)(__phdr)) + 28, __val) -#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \ - WRITEEF2BYTE(((u8 *)(__phdr)) + 32, __val) -#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) \ - READEF2BYTE(((u8 *)(__phdr)) + 34) -#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \ - WRITEEF2BYTE(((u8 *)(__phdr)) + 34, __val) -#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \ - SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \ - (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val)))) #define SET_TX_DESC_SPE_RPT(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 152242ac0aa5..191dafd03189 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -509,13 +509,7 @@ static u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist) static s32 halbtc_get_wifi_rssi(struct rtl_priv *rtlpriv) { - int undec_sm_pwdb = 0; - - if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) - undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; - else /* associated entry pwdb */ - undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; - return undec_sm_pwdb; + return rtlpriv->dm.undec_sm_pwdb; } static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf) diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c index a051de16284d..55db71c766fe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.c +++ b/drivers/net/wireless/realtek/rtlwifi/debug.c @@ -88,7 +88,7 @@ static const struct file_operations file_ops_common = { .open = dl_debug_open_common, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; static int rtl_debug_get_mac_page(struct seq_file *m, void *v) diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index ea4fc53764de..264667203f6f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -6,29 +6,12 @@ #include "pci.h" #include <linux/export.h> -static const u8 MAX_PGPKT_SIZE = 9; static const u8 PGPKT_DATA_SIZE = 8; static const int EFUSE_MAX_SIZE = 512; #define START_ADDRESS 0x1000 #define REG_MCUFWDL 0x0080 -static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = { - {0, 0, 0, 2}, - {0, 1, 0, 2}, - {0, 2, 0, 2}, - {1, 0, 0, 1}, - {1, 0, 1, 1}, - {1, 1, 0, 1}, - {1, 1, 1, 3}, - {1, 3, 0, 17}, - {3, 3, 1, 48}, - {10, 0, 0, 6}, - {10, 3, 0, 1}, - {10, 3, 1, 1}, - {11, 0, 0, 28} -}; - static const struct rtl_efuse_ops efuse_ops = { .efuse_onebyte_read = efuse_one_byte_read, .efuse_logical_map_read = efuse_shadow_read, diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 4055e0ab75ba..6087ec7a90a6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1793,6 +1793,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw) if (err) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Failed to config hardware!\n"); + kfree(rtlpriv->btcoexist.btc_context); + kfree(rtlpriv->btcoexist.wifi_only_context); return err; } rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, @@ -2409,8 +2411,7 @@ EXPORT_SYMBOL(rtl_pci_disconnect); ****************************************/ int rtl_pci_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ieee80211_hw *hw = dev_get_drvdata(dev); struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->cfg->ops->hw_suspend(hw); @@ -2422,8 +2423,7 @@ EXPORT_SYMBOL(rtl_pci_suspend); int rtl_pci_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ieee80211_hw *hw = dev_get_drvdata(dev); struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->cfg->ops->hw_resume(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c index 6ccb5b93a595..c10432cd703e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/regd.c +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c @@ -276,22 +276,6 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy, return; } -static void _rtl_dump_channel_map(struct wiphy *wiphy) -{ - enum nl80211_band band; - struct ieee80211_supported_band *sband; - struct ieee80211_channel *ch; - unsigned int i; - - for (band = 0; band < NUM_NL80211_BANDS; band++) { - if (!wiphy->bands[band]) - continue; - sband = wiphy->bands[band]; - for (i = 0; i < sband->n_channels; i++) - ch = &sband->channels[i]; - } -} - static int _rtl_reg_notifier_apply(struct wiphy *wiphy, struct regulatory_request *request, struct rtl_regulatory *reg) @@ -309,8 +293,6 @@ static int _rtl_reg_notifier_apply(struct wiphy *wiphy, break; } - _rtl_dump_channel_map(wiphy); - return 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h index fa2e1b063f68..edcca42c7464 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h @@ -12,35 +12,6 @@ #define RX_CMD_QUEUE 1 #define C2H_RX_CMD_HDR_LEN 8 -#define GET_C2H_CMD_CMD_LEN(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 0, 16) -#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 16, 8) -#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 24, 7) -#define GET_C2H_CMD_CONTINUE(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 31, 1) -#define GET_C2H_CMD_CONTENT(__prxhdr) \ - ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN) - -#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16) -#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) -#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) #define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c index 85360353f557..333e355c9281 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c @@ -1411,12 +1411,13 @@ void rtl88e_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *pfat_table = &rtldm->fat_table; + __le32 *pdesc32 = (__le32 *)pdesc; if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) || (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV)) { - SET_TX_DESC_ANTSEL_A(pdesc, pfat_table->antsel_a[mac_id]); - SET_TX_DESC_ANTSEL_B(pdesc, pfat_table->antsel_b[mac_id]); - SET_TX_DESC_ANTSEL_C(pdesc, pfat_table->antsel_c[mac_id]); + set_tx_desc_antsel_a(pdesc32, pfat_table->antsel_a[mac_id]); + set_tx_desc_antsel_b(pdesc32, pfat_table->antsel_b[mac_id]); + set_tx_desc_antsel_c(pdesc32, pfat_table->antsel_c[mac_id]); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index eab48fed61ed..a0eda51e833c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -115,10 +115,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; - rtlpriv->cfg->mod_params->sw_crypto = - rtlpriv->cfg->mod_params->sw_crypto; - rtlpriv->cfg->mod_params->disable_watchdog = - rtlpriv->cfg->mod_params->disable_watchdog; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); if (!rtlpriv->psc.inactiveps) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 483dc8bdc555..aa2e9e88be53 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -25,7 +25,7 @@ static u8 _rtl88ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) } static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, - struct rtl_stats *pstatus, u8 *pdesc, + struct rtl_stats *pstatus, __le32 *pdesc, struct rx_fwinfo_88e *p_drvinfo, bool bpacket_match_bssid, bool bpacket_toself, bool packet_beacon) @@ -271,7 +271,7 @@ static void _rtl88ee_smart_antenna(struct ieee80211_hw *hw, static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw, struct sk_buff *skb, struct rtl_stats *pstatus, - u8 *pdesc, + __le32 *pdesc, struct rx_fwinfo_88e *p_drvinfo) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -313,13 +313,13 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw, rtl_process_phyinfo(hw, tmp_buf, pstatus); } -static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, - u8 *virtualaddress) +static void rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, + __le32 *virtualaddress) { u32 dwtmp = 0; memset(virtualaddress, 0, 8); - SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num); + set_earlymode_pktnum(virtualaddress, ptcb_desc->empkt_num); if (ptcb_desc->empkt_num == 1) { dwtmp = ptcb_desc->empkt_len[0]; } else { @@ -327,7 +327,7 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4; dwtmp += ptcb_desc->empkt_len[1]; } - SET_EARLYMODE_LEN0(virtualaddress, dwtmp); + set_earlymode_len0(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 3) { dwtmp = ptcb_desc->empkt_len[2]; @@ -336,7 +336,7 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4; dwtmp += ptcb_desc->empkt_len[3]; } - SET_EARLYMODE_LEN1(virtualaddress, dwtmp); + set_earlymode_len1(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 5) { dwtmp = ptcb_desc->empkt_len[4]; } else { @@ -344,8 +344,8 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4; dwtmp += ptcb_desc->empkt_len[5]; } - SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF); - SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4); + set_earlymode_len2_1(virtualaddress, dwtmp & 0xF); + set_earlymode_len2_2(virtualaddress, dwtmp >> 4); if (ptcb_desc->empkt_num <= 7) { dwtmp = ptcb_desc->empkt_len[6]; } else { @@ -353,7 +353,7 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4; dwtmp += ptcb_desc->empkt_len[7]; } - SET_EARLYMODE_LEN3(virtualaddress, dwtmp); + set_earlymode_len3(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 9) { dwtmp = ptcb_desc->empkt_len[8]; } else { @@ -361,50 +361,51 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4; dwtmp += ptcb_desc->empkt_len[9]; } - SET_EARLYMODE_LEN4(virtualaddress, dwtmp); + set_earlymode_len4(virtualaddress, dwtmp); } bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *status, struct ieee80211_rx_status *rx_status, - u8 *pdesc, struct sk_buff *skb) + u8 *pdesc8, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rx_fwinfo_88e *p_drvinfo; struct ieee80211_hdr *hdr; u8 wake_match; - u32 phystatus = GET_RX_DESC_PHYST(pdesc); + __le32 *pdesc = (__le32 *)pdesc8; + u32 phystatus = get_rx_desc_physt(pdesc); - status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc); + status->packet_report_type = (u8)get_rx_status_desc_rpt_sel(pdesc); if (status->packet_report_type == TX_REPORT2) - status->length = (u16)GET_RX_RPT2_DESC_PKT_LEN(pdesc); + status->length = (u16)get_rx_rpt2_desc_pkt_len(pdesc); else - status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); - status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + status->length = (u16)get_rx_desc_pkt_len(pdesc); + status->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) * RX_DRV_INFO_SIZE_UNIT; - status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03); - status->icv = (u16)GET_RX_DESC_ICV(pdesc); - status->crc = (u16)GET_RX_DESC_CRC32(pdesc); + status->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03); + status->icv = (u16)get_rx_desc_icv(pdesc); + status->crc = (u16)get_rx_desc_crc32(pdesc); status->hwerror = (status->crc | status->icv); - status->decrypted = !GET_RX_DESC_SWDEC(pdesc); - status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); - status->shortpreamble = (u16)GET_RX_DESC_SPLCP(pdesc); - status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); - status->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1) && - (GET_RX_DESC_FAGGR(pdesc) == 1)); + status->decrypted = !get_rx_desc_swdec(pdesc); + status->rate = (u8)get_rx_desc_rxmcs(pdesc); + status->shortpreamble = (u16)get_rx_desc_splcp(pdesc); + status->isampdu = (bool) (get_rx_desc_paggr(pdesc) == 1); + status->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) && + (get_rx_desc_faggr(pdesc) == 1)); if (status->packet_report_type == NORMAL_RX) - status->timestamp_low = GET_RX_DESC_TSFL(pdesc); - status->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc); - status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); + status->timestamp_low = get_rx_desc_tsfl(pdesc); + status->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc); + status->is_ht = (bool)get_rx_desc_rxht(pdesc); status->is_cck = RTL8188_RX_HAL_IS_CCK_RATE(status->rate); - status->macid = GET_RX_DESC_MACID(pdesc); - if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc)) + status->macid = get_rx_desc_macid(pdesc); + if (get_rx_status_desc_pattern_match(pdesc)) wake_match = BIT(2); - else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) + else if (get_rx_status_desc_magic_match(pdesc)) wake_match = BIT(1); - else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) + else if (get_rx_status_desc_unicast_match(pdesc)) wake_match = BIT(0); else wake_match = 0; @@ -465,15 +466,15 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, rx_status->signal = status->recvsignalpower + 10; if (status->packet_report_type == TX_REPORT2) { status->macid_valid_entry[0] = - GET_RX_RPT2_DESC_MACID_VALID_1(pdesc); + get_rx_rpt2_desc_macid_valid_1(pdesc); status->macid_valid_entry[1] = - GET_RX_RPT2_DESC_MACID_VALID_2(pdesc); + get_rx_rpt2_desc_macid_valid_2(pdesc); } return true; } void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, - struct ieee80211_hdr *hdr, u8 *pdesc_tx, + struct ieee80211_hdr *hdr, u8 *pdesc8, u8 *txbd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff *skb, @@ -484,7 +485,6 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; unsigned int buf_len = 0; @@ -497,6 +497,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, dma_addr_t mapping; u8 bw_40 = 0; u8 short_gi = 0; + __le32 *pdesc = (u32 *)pdesc8; if (mac->opmode == NL80211_IFTYPE_STATION) { bw_40 = mac->bw_40; @@ -521,77 +522,77 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, "DMA mapping error\n"); return; } - CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_88e)); + clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_88e)); if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) { firstseg = true; lastseg = true; } if (firstseg) { if (rtlhal->earlymode_enable) { - SET_TX_DESC_PKT_OFFSET(pdesc, 1); - SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN + + set_tx_desc_pkt_offset(pdesc, 1); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN + EM_HDR_LEN); if (ptcb_desc->empkt_num) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Insert 8 byte.pTcb->EMPktNum:%d\n", ptcb_desc->empkt_num); - _rtl88ee_insert_emcontent(ptcb_desc, - (u8 *)(skb->data)); + rtl88ee_insert_emcontent(ptcb_desc, + (__le32 *)(skb->data)); } } else { - SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); } ptcb_desc->use_driver_rate = true; - SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); + set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate); if (ptcb_desc->hw_rate > DESC92C_RATEMCS0) short_gi = (ptcb_desc->use_shortgi) ? 1 : 0; else short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0; - SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi); + set_tx_desc_data_shortgi(pdesc, short_gi); if (info->flags & IEEE80211_TX_CTL_AMPDU) { - SET_TX_DESC_AGG_ENABLE(pdesc, 1); - SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14); + set_tx_desc_agg_enable(pdesc, 1); + set_tx_desc_max_agg_num(pdesc, 0x14); } - SET_TX_DESC_SEQ(pdesc, seq_number); - SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable && + set_tx_desc_seq(pdesc, seq_number); + set_tx_desc_rts_enable(pdesc, ((ptcb_desc->rts_enable && !ptcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0); - SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0)); - - SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); - SET_TX_DESC_RTS_BW(pdesc, 0); - SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); - SET_TX_DESC_RTS_SHORT(pdesc, + set_tx_desc_hw_rts_enable(pdesc, 0); + set_tx_desc_cts2self(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0)); + set_tx_desc_rts_stbc(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0)); + + set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate); + set_tx_desc_rts_bw(pdesc, 0); + set_tx_desc_rts_sc(pdesc, ptcb_desc->rts_sc); + set_tx_desc_rts_short(pdesc, ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ? (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : (ptcb_desc->rts_use_shortgi ? 1 : 0))); if (ptcb_desc->tx_enable_sw_calc_duration) - SET_TX_DESC_NAV_USE_HDR(pdesc, 1); + set_tx_desc_nav_use_hdr(pdesc, 1); if (bw_40) { if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { - SET_TX_DESC_DATA_BW(pdesc, 1); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); + set_tx_desc_data_bw(pdesc, 1); + set_tx_desc_tx_sub_carrier(pdesc, 3); } else { - SET_TX_DESC_DATA_BW(pdesc, 0); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, + set_tx_desc_data_bw(pdesc, 0); + set_tx_desc_tx_sub_carrier(pdesc, mac->cur_40_prime_sc); } } else { - SET_TX_DESC_DATA_BW(pdesc, 0); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0); + set_tx_desc_data_bw(pdesc, 0); + set_tx_desc_tx_sub_carrier(pdesc, 0); } - SET_TX_DESC_LINIP(pdesc, 0); - SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb_len); + set_tx_desc_linip(pdesc, 0); + set_tx_desc_pkt_size(pdesc, (u16)skb_len); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; - SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); + set_tx_desc_ampdu_density(pdesc, ampdu_density); } if (info->control.hw_key) { struct ieee80211_key_conf *keyconf; @@ -601,76 +602,77 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: - SET_TX_DESC_SEC_TYPE(pdesc, 0x1); + set_tx_desc_sec_type(pdesc, 0x1); break; case WLAN_CIPHER_SUITE_CCMP: - SET_TX_DESC_SEC_TYPE(pdesc, 0x3); + set_tx_desc_sec_type(pdesc, 0x3); break; default: - SET_TX_DESC_SEC_TYPE(pdesc, 0x0); + set_tx_desc_sec_type(pdesc, 0x0); break; } } - SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel); - SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); - SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); - SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ? + set_tx_desc_queue_sel(pdesc, fw_qsel); + set_tx_desc_data_rate_fb_limit(pdesc, 0x1F); + set_tx_desc_rts_rate_fb_limit(pdesc, 0xF); + set_tx_desc_disable_fb(pdesc, ptcb_desc->disable_ratefallback ? 1 : 0); - SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); + set_tx_desc_use_rate(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); - /*SET_TX_DESC_PWR_STATUS(pdesc, pwr_status);*/ + /*set_tx_desc_pwr_status(pdesc, pwr_status);*/ /* Set TxRate and RTSRate in TxDesc */ /* This prevent Tx initial rate of new-coming packets */ /* from being overwritten by retried packet rate.*/ if (!ptcb_desc->use_driver_rate) { - /*SET_TX_DESC_RTS_RATE(pdesc, 0x08); */ - /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */ + /*set_tx_desc_rts_rate(pdesc, 0x08); */ + /* set_tx_desc_tx_rate(pdesc, 0x0b); */ } if (ieee80211_is_data_qos(fc)) { if (mac->rdg_en) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Enable RDG function.\n"); - SET_TX_DESC_RDG_ENABLE(pdesc, 1); - SET_TX_DESC_HTC(pdesc, 1); + set_tx_desc_rdg_enable(pdesc, 1); + set_tx_desc_htc(pdesc, 1); } } } - SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); - SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); - SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)buf_len); - SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0)); + set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0)); + set_tx_desc_tx_buffer_size(pdesc, (u16)buf_len); + set_tx_desc_tx_buffer_address(pdesc, mapping); if (rtlpriv->dm.useramask) { - SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); - SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + set_tx_desc_rate_id(pdesc, ptcb_desc->ratr_index); + set_tx_desc_macid(pdesc, ptcb_desc->mac_id); } else { - SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index); - SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index); + set_tx_desc_rate_id(pdesc, 0xC + ptcb_desc->ratr_index); + set_tx_desc_macid(pdesc, ptcb_desc->ratr_index); } if (ieee80211_is_data_qos(fc)) - SET_TX_DESC_QOS(pdesc, 1); + set_tx_desc_qos(pdesc, 1); if (!ieee80211_is_data_qos(fc)) - SET_TX_DESC_HWSEQ_EN(pdesc, 1); - SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); + set_tx_desc_hwseq_en(pdesc, 1); + set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1)); if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { - SET_TX_DESC_BMC(pdesc, 1); + set_tx_desc_bmc(pdesc, 1); } - rtl88e_dm_set_tx_ant_by_tx_info(hw, pdesc, ptcb_desc->mac_id); + rtl88e_dm_set_tx_ant_by_tx_info(hw, pdesc8, ptcb_desc->mac_id); RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc, bool firstseg, + u8 *pdesc8, bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 fw_queue = QSLT_BEACON; + __le32 *pdesc = (__le32 *)pdesc8; dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, @@ -684,58 +686,60 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, "DMA mapping error\n"); return; } - CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); + clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); if (firstseg) - SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); - SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); + set_tx_desc_tx_rate(pdesc, DESC92C_RATE1M); - SET_TX_DESC_SEQ(pdesc, 0); + set_tx_desc_seq(pdesc, 0); - SET_TX_DESC_LINIP(pdesc, 0); + set_tx_desc_linip(pdesc, 0); - SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); + set_tx_desc_queue_sel(pdesc, fw_queue); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); - SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len)); + set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len)); - SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + set_tx_desc_tx_buffer_address(pdesc, mapping); - SET_TX_DESC_RATE_ID(pdesc, 7); - SET_TX_DESC_MACID(pdesc, 0); + set_tx_desc_rate_id(pdesc, 7); + set_tx_desc_macid(pdesc, 0); - SET_TX_DESC_OWN(pdesc, 1); + set_tx_desc_own(pdesc, 1); - SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len)); + set_tx_desc_pkt_size(pdesc, (u16)(skb->len)); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); - SET_TX_DESC_OFFSET(pdesc, 0x20); + set_tx_desc_offset(pdesc, 0x20); - SET_TX_DESC_USE_RATE(pdesc, 1); + set_tx_desc_use_rate(pdesc, 1); if (!ieee80211_is_data_qos(fc)) - SET_TX_DESC_HWSEQ_EN(pdesc, 1); + set_tx_desc_hwseq_en(pdesc, 1); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n", pdesc, TX_DESC_SIZE); } -void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, +void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx, u8 desc_name, u8 *val) { + __le32 *pdesc = (__le32 *)pdesc8; + if (istx == true) { switch (desc_name) { case HW_DESC_OWN: - SET_TX_DESC_OWN(pdesc, 1); + set_tx_desc_own(pdesc, 1); break; case HW_DESC_TX_NEXTDESC_ADDR: - SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); + set_tx_desc_next_desc_address(pdesc, *(u32 *)val); break; default: WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n", @@ -745,16 +749,16 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, } else { switch (desc_name) { case HW_DESC_RXOWN: - SET_RX_DESC_OWN(pdesc, 1); + set_rx_desc_own(pdesc, 1); break; case HW_DESC_RXBUFF_ADDR: - SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val); + set_rx_desc_buff_addr(pdesc, *(u32 *)val); break; case HW_DESC_RXPKT_LEN: - SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val); + set_rx_desc_pkt_len(pdesc, *(u32 *)val); break; case HW_DESC_RXERO: - SET_RX_DESC_EOR(pdesc, 1); + set_rx_desc_eor(pdesc, 1); break; default: WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n", @@ -765,17 +769,18 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, } u64 rtl88ee_get_desc(struct ieee80211_hw *hw, - u8 *pdesc, bool istx, u8 desc_name) + u8 *pdesc8, bool istx, u8 desc_name) { u32 ret = 0; + __le32 *pdesc = (__le32 *)pdesc8; if (istx == true) { switch (desc_name) { case HW_DESC_OWN: - ret = GET_TX_DESC_OWN(pdesc); + ret = get_tx_desc_own(pdesc); break; case HW_DESC_TXBUFF_ADDR: - ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); + ret = get_tx_desc_tx_buffer_address(pdesc); break; default: WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n", @@ -785,13 +790,13 @@ u64 rtl88ee_get_desc(struct ieee80211_hw *hw, } else { switch (desc_name) { case HW_DESC_OWN: - ret = GET_RX_DESC_OWN(pdesc); + ret = get_rx_desc_own(pdesc); break; case HW_DESC_RXPKT_LEN: - ret = GET_RX_DESC_PKT_LEN(pdesc); + ret = get_rx_desc_pkt_len(pdesc); break; case HW_DESC_RXBUFF_ADDR: - ret = GET_RX_DESC_BUFF_ADDR(pdesc); + ret = get_rx_desc_buff_addr(pdesc); break; default: WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n", diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index c29d9bfa5bd4..917729807514 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h @@ -14,505 +14,545 @@ #define USB_HWDESC_HEADER_LEN 32 #define CRCLENGTH 4 -#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) -#define SET_TX_DESC_OFFSET(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) -#define SET_TX_DESC_BMC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) -#define SET_TX_DESC_HTC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) -#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) -#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) -#define SET_TX_DESC_LINIP(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) -#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) -#define SET_TX_DESC_GF(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) -#define SET_TX_DESC_OWN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) - -#define GET_TX_DESC_PKT_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 0, 16) -#define GET_TX_DESC_OFFSET(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 16, 8) -#define GET_TX_DESC_BMC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 24, 1) -#define GET_TX_DESC_HTC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 25, 1) -#define GET_TX_DESC_LAST_SEG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 26, 1) -#define GET_TX_DESC_FIRST_SEG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 27, 1) -#define GET_TX_DESC_LINIP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 28, 1) -#define GET_TX_DESC_NO_ACM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 29, 1) -#define GET_TX_DESC_GF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 30, 1) -#define GET_TX_DESC_OWN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 31, 1) - -#define SET_TX_DESC_MACID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 6, __val) -#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) -#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) -#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) -#define SET_TX_DESC_PIFS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) -#define SET_TX_DESC_RATE_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val) -#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val) -#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) -#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) -#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 26, 5, __val) -#define SET_TX_DESC_PADDING_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val) - -#define GET_TX_DESC_MACID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) -#define GET_TX_DESC_AGG_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 5, 1) -#define GET_TX_DESC_AGG_BREAK(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 6, 1) -#define GET_TX_DESC_RDG_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 7, 1) -#define GET_TX_DESC_QUEUE_SEL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 8, 5) -#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) -#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) -#define GET_TX_DESC_PIFS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) -#define GET_TX_DESC_RATE_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) -#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) -#define GET_TX_DESC_EN_DESC_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) -#define GET_TX_DESC_SEC_TYPE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 22, 2) -#define GET_TX_DESC_PKT_OFFSET(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 24, 8) - -#define SET_TX_DESC_RTS_RC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val) -#define SET_TX_DESC_DATA_RC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val) -#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val) -#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val) -#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val) -#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val) -#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) -#define SET_TX_DESC_RAW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) -#define SET_TX_DESC_CCX(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val) -#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) -#define SET_TX_DESC_BT_INT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val) -#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val) -#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val) -#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val) -#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val) -#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val) - -#define GET_TX_DESC_RTS_RC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 0, 6) -#define GET_TX_DESC_DATA_RC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 6, 6) -#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 14, 2) -#define GET_TX_DESC_MORE_FRAG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 17, 1) -#define GET_TX_DESC_RAW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 18, 1) -#define GET_TX_DESC_CCX(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 19, 1) -#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 20, 3) -#define GET_TX_DESC_ANTSEL_A(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 24, 1) -#define GET_TX_DESC_ANTSEL_B(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 25, 1) -#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 26, 2) -#define GET_TX_DESC_TX_ANTL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 28, 2) -#define GET_TX_DESC_TX_ANT_HT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 30, 2) - -#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val) -#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val) -#define SET_TX_DESC_SEQ(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val) -#define SET_TX_DESC_CPU_HANDLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 1, __val) -#define SET_TX_DESC_TAG1(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 29, 1, __val) -#define SET_TX_DESC_TRIGGER_INT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 30, 1, __val) -#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 31, 1, __val) - -#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 0, 8) -#define GET_TX_DESC_TAIL_PAGE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 8, 8) -#define GET_TX_DESC_SEQ(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 16, 12) - -#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val) -#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val) -#define SET_TX_DESC_QOS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val) -#define SET_TX_DESC_HWSEQ_SSN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val) -#define SET_TX_DESC_USE_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val) -#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val) -#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val) -#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val) -#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val) -#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val) -#define SET_TX_DESC_PORT_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val) -#define SET_TX_DESC_PWR_STATUS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 15, 3, __val) -#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val) -#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val) -#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val) -#define SET_TX_DESC_TX_STBC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val) -#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val) -#define SET_TX_DESC_DATA_BW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val) -#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val) -#define SET_TX_DESC_RTS_BW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val) -#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val) -#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val) - -#define GET_TX_DESC_RTS_RATE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 0, 5) -#define GET_TX_DESC_AP_DCFE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 5, 1) -#define GET_TX_DESC_QOS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 6, 1) -#define GET_TX_DESC_HWSEQ_EN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 7, 1) -#define GET_TX_DESC_USE_RATE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 8, 1) -#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 9, 1) -#define GET_TX_DESC_DISABLE_FB(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 10, 1) -#define GET_TX_DESC_CTS2SELF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 11, 1) -#define GET_TX_DESC_RTS_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 12, 1) -#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 13, 1) -#define GET_TX_DESC_PORT_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 14, 1) -#define GET_TX_DESC_WAIT_DCTS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 18, 1) -#define GET_TX_DESC_CTS2AP_EN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 19, 1) -#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 20, 2) -#define GET_TX_DESC_TX_STBC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 22, 2) -#define GET_TX_DESC_DATA_SHORT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 24, 1) -#define GET_TX_DESC_DATA_BW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 25, 1) -#define GET_TX_DESC_RTS_SHORT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 26, 1) -#define GET_TX_DESC_RTS_BW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 27, 1) -#define GET_TX_DESC_RTS_SC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 28, 2) -#define GET_TX_DESC_RTS_STBC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 30, 2) - -#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val) -#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val) -#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val) -#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val) -#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val) -#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val) -#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val) -#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val) - -#define GET_TX_DESC_TX_RATE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 0, 6) -#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 6, 1) -#define GET_TX_DESC_CCX_TAG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 7, 1) -#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 8, 5) -#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 13, 4) -#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 17, 1) -#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 18, 6) -#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 24, 8) - -#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val) -#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val) -#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val) -#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val) -#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val) -#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val) -#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val) -#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val) - -#define GET_TX_DESC_TXAGC_A(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 0, 5) -#define GET_TX_DESC_TXAGC_B(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 5, 5) -#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 10, 1) -#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 11, 5) -#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 16, 4) -#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 20, 4) -#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 24, 4) -#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 28, 4) - -#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val) -#define SET_TX_DESC_SW_OFFSET30(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 8, __val) -#define SET_TX_DESC_SW_OFFSET31(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val) -#define SET_TX_DESC_ANTSEL_C(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 29, 1, __val) -#define SET_TX_DESC_NULL_0(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 30, 1, __val) -#define SET_TX_DESC_NULL_1(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 30, 1, __val) - -#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 0, 16) - -#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val) -#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val) - -#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+32, 0, 32) -#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+36, 0, 32) - -#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) -#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val) - -#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+40, 0, 32) -#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+44, 0, 32) - -#define GET_RX_DESC_PKT_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 0, 14) -#define GET_RX_DESC_CRC32(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 14, 1) -#define GET_RX_DESC_ICV(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 15, 1) -#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 16, 4) -#define GET_RX_DESC_SECURITY(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 20, 3) -#define GET_RX_DESC_QOS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 23, 1) -#define GET_RX_DESC_SHIFT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 24, 2) -#define GET_RX_DESC_PHYST(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 26, 1) -#define GET_RX_DESC_SWDEC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 27, 1) -#define GET_RX_DESC_LS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 28, 1) -#define GET_RX_DESC_FS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 29, 1) -#define GET_RX_DESC_EOR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 30, 1) -#define GET_RX_DESC_OWN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 31, 1) - -#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) -#define SET_RX_DESC_EOR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) -#define SET_RX_DESC_OWN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) - -#define GET_RX_DESC_MACID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 0, 6) -#define GET_RX_DESC_PAGGR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) -#define GET_RX_DESC_FAGGR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) -#define GET_RX_DESC_A1_FIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) -#define GET_RX_DESC_A2_FIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 20, 4) -#define GET_RX_DESC_PAM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) -#define GET_RX_DESC_PWR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) -#define GET_RX_DESC_MD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) -#define GET_RX_DESC_MF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) -#define GET_RX_DESC_TYPE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) -#define GET_RX_DESC_MC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) -#define GET_RX_DESC_BC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) -#define GET_RX_DESC_SEQ(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) -#define GET_RX_DESC_FRAG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) - -#define GET_RX_DESC_RXMCS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 0, 6) -#define GET_RX_DESC_RXHT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 6, 1) -#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 7, 1) -#define GET_RX_DESC_SPLCP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 8, 1) -#define GET_RX_DESC_BW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 9, 1) -#define GET_RX_DESC_HTC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) -#define GET_RX_STATUS_DESC_EOSP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 11, 1) -#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 12, 2) -#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 14, 2) - -#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 29, 1) -#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 30, 1) -#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 31, 1) - -#define GET_RX_DESC_IV1(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 0, 32) -#define GET_RX_DESC_TSFL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) - -#define GET_RX_DESC_BUFF_ADDR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 0, 32) -#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) - -#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) -#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) +static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(15, 0)); +} + +static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(23, 16)); +} + +static inline void set_tx_desc_bmc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(24)); +} + +static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(25)); +} + +static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(26)); +} + +static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(27)); +} + +static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(28)); +} + +static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} + +static inline int get_tx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(31)); +} + +static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 1, __val, GENMASK(5, 0)); +} + +static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 1, __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 1, __val, GENMASK(19, 16)); +} + +static inline void set_tx_desc_nav_use_hdr(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 1, __val, BIT(20)); +} + +static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 1, __val, GENMASK(23, 22)); +} + +static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 1, __val, GENMASK(30, 26)); +} + +static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 2, __val, BIT(12)); +} + +static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 2, __val, BIT(13)); +} + +static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 2, __val, BIT(17)); +} + +static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 2, __val, GENMASK(22, 20)); +} + +static inline void set_tx_desc_antsel_a(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 2, __val, BIT(24)); +} + +static inline void set_tx_desc_antsel_b(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 2, __val, BIT(25)); +} + +static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 3, __val, GENMASK(27, 16)); +} + +static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 3, __val, BIT(31)); +} + +static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, GENMASK(4, 0)); +} + +static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, BIT(6)); +} + +static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, BIT(8)); +} + +static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, BIT(10)); +} + +static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, BIT(11)); +} + +static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, BIT(12)); +} + +static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, BIT(13)); +} + +static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, GENMASK(21, 20)); +} + +static inline void set_tx_desc_tx_stbc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, GENMASK(23, 22)); +} + +static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, BIT(25)); +} + +static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, BIT(26)); +} + +static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, BIT(27)); +} + +static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, GENMASK(29, 28)); +} + +static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 4, __val, GENMASK(31, 30)); +} + +static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 5, __val, GENMASK(5, 0)); +} + +static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 5, __val, BIT(6)); +} + +static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 5, __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 5, __val, GENMASK(16, 13)); +} + +static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 6, __val, GENMASK(15, 11)); +} + +static inline void set_tx_desc_antsel_c(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 7, __val, BIT(29)); +} + +static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc + 7, __val, GENMASK(15, 0)); +} + +static inline int get_tx_desc_tx_buffer_size(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 7), GENMASK(15, 0)); +} + +static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 8) = cpu_to_le32(__val); +} + +static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc) +{ + return le32_to_cpu(*(__pdesc + 8)); +} + +static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 10) = cpu_to_le32(__val); +} + +static inline int get_rx_desc_pkt_len(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), GENMASK(13, 0)); +} + +static inline int get_rx_desc_crc32(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(14)); +} + +static inline int get_rx_desc_icv(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(15)); +} + +static inline int get_rx_desc_drv_info_size(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), GENMASK(19, 16)); +} + +static inline int get_rx_desc_security(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), GENMASK(22, 20)); +} + +static inline int get_rx_desc_qos(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(23)); +} + +static inline int get_rx_desc_shift(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), GENMASK(25, 24)); +} + +static inline int get_rx_desc_physt(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(26)); +} + +static inline int get_rx_desc_swdec(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(27)); +} + +static inline int get_rx_desc_ls(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(28)); +} + +static inline int get_rx_desc_fs(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(29)); +} + +static inline int get_rx_desc_eor(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(30)); +} + +static inline int get_rx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(31)); +} + +static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(13, 0)); +} + +static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(30)); +} + +static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} + +static inline int get_rx_desc_macid(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), GENMASK(5, 0)); +} + +static inline int get_rx_desc_paggr(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(14)); +} + +static inline int get_rx_desc_faggr(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(15)); +} + +static inline int get_rx_desc_a1_fit(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), GENMASK(19, 16)); +} + +static inline int get_rx_desc_a2_fit(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), GENMASK(23, 20)); +} + +static inline int get_rx_desc_pam(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(24)); +} + +static inline int get_rx_desc_pwr(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(25)); +} + +static inline int get_rx_desc_md(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(26)); +} + +static inline int get_rx_desc_mf(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(27)); +} + +static inline int get_rx_desc_type(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), GENMASK(29, 28)); +} + +static inline int get_rx_desc_mc(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(30)); +} + +static inline int get_rx_desc_bc(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(31)); +} + +static inline int get_rx_desc_seq(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 2), GENMASK(11, 0)); +} + +static inline int get_rx_desc_frag(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 2), GENMASK(15, 12)); +} + +static inline int get_rx_desc_rxmcs(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0)); +} + +static inline int get_rx_desc_rxht(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(6)); +} + +static inline int get_rx_status_desc_rx_gf(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(7)); +} + +static inline int get_rx_desc_splcp(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(8)); +} + +static inline int get_rx_desc_bw(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(9)); +} + +static inline int get_rx_desc_htc(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(10)); +} + +static inline int get_rx_status_desc_eosp(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(11)); +} + +static inline int get_rx_status_desc_bssid_fit(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), GENMASK(13, 12)); +} + +static inline int get_rx_status_desc_rpt_sel(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), GENMASK(15, 14)); +} + +static inline int get_rx_status_desc_pattern_match(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(29)); +} + +static inline int get_rx_status_desc_unicast_match(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(30)); +} + +static inline int get_rx_status_desc_magic_match(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(31)); +} + +static inline u32 get_rx_desc_iv1(__le32 *__pdesc) +{ + return le32_to_cpu(*(__pdesc + 4)); +} + +static inline u32 get_rx_desc_tsfl(__le32 *__pdesc) +{ + return le32_to_cpu(*(__pdesc + 5)); +} + +static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc) +{ + return le32_to_cpu(*(__pdesc + 6)); +} + +static inline u32 get_rx_desc_buff_addr64(__le32 *__pdesc) +{ + return le32_to_cpu(*(__pdesc + 7)); +} + +static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 6) = cpu_to_le32(__val); +} + +static inline void set_rx_desc_buff_addr64(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 7) = cpu_to_le32(__val); +} /* TX report 2 format in Rx desc*/ -#define GET_RX_RPT2_DESC_PKT_LEN(__status) \ - LE_BITS_TO_4BYTE(__status, 0, 9) -#define GET_RX_RPT2_DESC_MACID_VALID_1(__status) \ - LE_BITS_TO_4BYTE(__status+16, 0, 32) -#define GET_RX_RPT2_DESC_MACID_VALID_2(__status) \ - LE_BITS_TO_4BYTE(__status+20, 0, 32) - -#define SET_EARLYMODE_PKTNUM(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value) -#define SET_EARLYMODE_LEN0(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value) -#define SET_EARLYMODE_LEN1(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value) -#define SET_EARLYMODE_LEN2_1(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value) -#define SET_EARLYMODE_LEN2_2(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value) -#define SET_EARLYMODE_LEN3(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value) -#define SET_EARLYMODE_LEN4(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value) - -#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ -do { \ - if (_size > TX_DESC_NEXT_DESC_OFFSET) \ - memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ - else \ - memset(__pdesc, 0, _size); \ -} while (0) +static inline int get_rx_rpt2_desc_pkt_len(__le32 *__status) +{ + return le32_get_bits(*(__status), GENMASK(8, 0)); +} + +static inline u32 get_rx_rpt2_desc_macid_valid_1(__le32 *__status) +{ + return le32_to_cpu(*(__status + 4)); +} + +static inline u32 get_rx_rpt2_desc_macid_valid_2(__le32 *__status) +{ + return le32_to_cpu(*(__status + 5)); +} + +static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(3, 0)); +} + +static inline void set_earlymode_len0(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(15, 4)); +} + +static inline void set_earlymode_len1(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(27, 16)); +} + +static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(31, 28)); +} + +static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr + 1, __value, GENMASK(7, 0)); +} + +static inline void set_earlymode_len3(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr + 1, __value, GENMASK(19, 8)); +} + +static inline void set_earlymode_len4(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr + 1, __value, GENMASK(31, 20)); +} + +static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size) +{ + if (_size > TX_DESC_NEXT_DESC_OFFSET) + memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); + else + memset(__pdesc, 0, _size); +} #define RTL8188_RX_HAL_IS_CCK_RATE(rxmcs)\ (rxmcs == DESC92C_RATE1M ||\ @@ -520,17 +560,7 @@ do { \ rxmcs == DESC92C_RATE5_5M ||\ rxmcs == DESC92C_RATE11M) -#define IS_LITTLE_ENDIAN 1 - -struct phy_rx_agc_info_t { - #if IS_LITTLE_ENDIAN - u8 gain:7, trsw:1; - #else - u8 trsw:1, gain:7; - #endif -}; struct phy_status_rpt { - struct phy_rx_agc_info_t path_agc[2]; u8 ch_corr[2]; u8 cck_sig_qual_ofdm_pwdb_all; u8 cck_agc_rpt_ofdm_cfosho_a; @@ -547,7 +577,7 @@ struct phy_status_rpt { u8 stream_target_csi[2]; u8 sig_evm; u8 rsvd_3; -#if IS_LITTLE_ENDIAN +#if defined(__LITTLE_ENDIAN) u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ u8 sgi_en:1; u8 rxsc:2; @@ -555,7 +585,7 @@ struct phy_status_rpt { u8 r_ant_train_en:1; u8 ant_sel_b:1; u8 ant_sel:1; -#else /* _BIG_ENDIAN_ */ +#else /* __BIG_ENDIAN */ u8 ant_sel:1; u8 ant_sel_b:1; u8 r_ant_train_en:1; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h index 147bf2407f8f..34486bd3e109 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h @@ -17,39 +17,6 @@ #define RX_MPDU_QUEUE 0 #define RX_CMD_QUEUE 1 -#define C2H_RX_CMD_HDR_LEN 8 -#define GET_C2H_CMD_CMD_LEN(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 0, 16) -#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 16, 8) -#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 24, 7) -#define GET_C2H_CMD_CONTINUE(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 31, 1) -#define GET_C2H_CMD_CONTENT(__prxhdr) \ - ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN) - -#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16) -#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) -#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) -#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32) - #define CHIP_VER_B BIT(4) #define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3) #define CHIP_BONDING_92C_1T2R 0x1 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index a9c0111444bc..900788e4018c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -113,8 +113,6 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; - rtlpriv->cfg->mod_params->sw_crypto = - rtlpriv->cfg->mod_params->sw_crypto; if (!rtlpriv->psc.inactiveps) pr_info("rtl8192ce: Power Save off (module option)\n"); if (!rtlpriv->psc.fwctrl_lps) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index 18a0ab59631a..fc9a3aae047f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -33,27 +33,6 @@ static u8 _rtl92c_query_rxpwrpercentage(s8 antpower) return 100 + antpower; } -static u8 _rtl92c_evm_db_to_percentage(s8 value) -{ - s8 ret_val; - - ret_val = value; - - if (ret_val >= 0) - ret_val = 0; - - if (ret_val <= -33) - ret_val = -33; - - ret_val = 0 - ret_val; - ret_val *= 3; - - if (ret_val == 99) - ret_val = 100; - - return ret_val; -} - static long _rtl92ce_signal_scale_mapping(struct ieee80211_hw *hw, long currsig) { @@ -243,7 +222,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, max_spatial_stream = 1; for (i = 0; i < max_spatial_stream; i++) { - evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]); + evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]); if (packet_match_bssid) { /* Fill value in RFD, Get the first @@ -251,8 +230,8 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, */ if (i == 0) pstats->signalquality = - (u8) (evm & 0xff); - pstats->rx_mimo_sig_qual[i] = (u8) (evm & 0xff); + (u8)(evm & 0xff); + pstats->rx_mimo_sig_qual[i] = (u8)(evm & 0xff); } } } @@ -262,10 +241,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, */ if (is_cck_rate) pstats->signalstrength = - (u8) (_rtl92ce_signal_scale_mapping(hw, pwdb_all)); + (u8)(_rtl92ce_signal_scale_mapping(hw, pwdb_all)); else if (rf_rx_num != 0) pstats->signalstrength = - (u8) (_rtl92ce_signal_scale_mapping + (u8)(_rtl92ce_signal_scale_mapping (hw, total_rssi /= rf_rx_num)); } @@ -317,29 +296,30 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw, bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, struct ieee80211_rx_status *rx_status, - u8 *p_desc, struct sk_buff *skb) + u8 *p_desc8, struct sk_buff *skb) { struct rx_fwinfo_92c *p_drvinfo; - struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc; + struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc8; struct ieee80211_hdr *hdr; - u32 phystatus = GET_RX_DESC_PHYST(pdesc); + __le32 *p_desc = (__le32 *)p_desc8; + u32 phystatus = get_rx_desc_physt(p_desc); - stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); - stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + stats->length = (u16)get_rx_desc_pkt_len(p_desc); + stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(p_desc) * RX_DRV_INFO_SIZE_UNIT; - stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); - stats->icv = (u16) GET_RX_DESC_ICV(pdesc); - stats->crc = (u16) GET_RX_DESC_CRC32(pdesc); + stats->rx_bufshift = (u8)(get_rx_desc_shift(p_desc) & 0x03); + stats->icv = (u16)get_rx_desc_icv(p_desc); + stats->crc = (u16)get_rx_desc_crc32(p_desc); stats->hwerror = (stats->crc | stats->icv); - stats->decrypted = !GET_RX_DESC_SWDEC(pdesc); - stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc); - stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); - stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); - stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) - && (GET_RX_DESC_FAGGR(pdesc) == 1)); - stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); - stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc); - stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); + stats->decrypted = !get_rx_desc_swdec(p_desc); + stats->rate = (u8)get_rx_desc_rxmcs(p_desc); + stats->shortpreamble = (u16)get_rx_desc_splcp(p_desc); + stats->isampdu = (bool)(get_rx_desc_paggr(p_desc) == 1); + stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(p_desc) == 1) && + (get_rx_desc_faggr(p_desc) == 1)); + stats->timestamp_low = get_rx_desc_tsfl(p_desc); + stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(p_desc); + stats->is_ht = (bool)get_rx_desc_rxht(p_desc); stats->is_cck = RX_HAL_IS_CCK_RATE(pdesc->rxmcs); @@ -400,7 +380,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, } void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, - struct ieee80211_hdr *hdr, u8 *pdesc_tx, + struct ieee80211_hdr *hdr, u8 *pdesc8, u8 *pbd_desc_tx, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff *skb, @@ -411,7 +391,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool defaultadapter = true; - u8 *pdesc = pdesc_tx; + __le32 *pdesc = (__le32 *)pdesc8; u16 seq_number; __le16 fc = hdr->frame_control; u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue); @@ -447,64 +427,64 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc); - CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c)); + clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_92c)); if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) { firstseg = true; lastseg = true; } if (firstseg) { - SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); - SET_TX_DESC_TX_RATE(pdesc, tcb_desc->hw_rate); + set_tx_desc_tx_rate(pdesc, tcb_desc->hw_rate); if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble) - SET_TX_DESC_DATA_SHORTGI(pdesc, 1); + set_tx_desc_data_shortgi(pdesc, 1); if (info->flags & IEEE80211_TX_CTL_AMPDU) { - SET_TX_DESC_AGG_BREAK(pdesc, 1); - SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14); + set_tx_desc_agg_break(pdesc, 1); + set_tx_desc_max_agg_num(pdesc, 0x14); } - SET_TX_DESC_SEQ(pdesc, seq_number); + set_tx_desc_seq(pdesc, seq_number); - SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc->rts_enable && + set_tx_desc_rts_enable(pdesc, ((tcb_desc->rts_enable && !tcb_desc-> cts_enable) ? 1 : 0)); - SET_TX_DESC_HW_RTS_ENABLE(pdesc, + set_tx_desc_hw_rts_enable(pdesc, ((tcb_desc->rts_enable || tcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc->rts_stbc) ? 1 : 0)); + set_tx_desc_cts2self(pdesc, ((tcb_desc->cts_enable) ? 1 : 0)); + set_tx_desc_rts_stbc(pdesc, ((tcb_desc->rts_stbc) ? 1 : 0)); - SET_TX_DESC_RTS_RATE(pdesc, tcb_desc->rts_rate); - SET_TX_DESC_RTS_BW(pdesc, 0); - SET_TX_DESC_RTS_SC(pdesc, tcb_desc->rts_sc); - SET_TX_DESC_RTS_SHORT(pdesc, + set_tx_desc_rts_rate(pdesc, tcb_desc->rts_rate); + set_tx_desc_rts_bw(pdesc, 0); + set_tx_desc_rts_sc(pdesc, tcb_desc->rts_sc); + set_tx_desc_rts_short(pdesc, ((tcb_desc->rts_rate <= DESC_RATE54M) ? (tcb_desc->rts_use_shortpreamble ? 1 : 0) : (tcb_desc->rts_use_shortgi ? 1 : 0))); if (bw_40) { if (tcb_desc->packet_bw) { - SET_TX_DESC_DATA_BW(pdesc, 1); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); + set_tx_desc_data_bw(pdesc, 1); + set_tx_desc_tx_sub_carrier(pdesc, 3); } else { - SET_TX_DESC_DATA_BW(pdesc, 0); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, + set_tx_desc_data_bw(pdesc, 0); + set_tx_desc_tx_sub_carrier(pdesc, mac->cur_40_prime_sc); } } else { - SET_TX_DESC_DATA_BW(pdesc, 0); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0); + set_tx_desc_data_bw(pdesc, 0); + set_tx_desc_tx_sub_carrier(pdesc, 0); } - SET_TX_DESC_LINIP(pdesc, 0); - SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len); + set_tx_desc_linip(pdesc, 0); + set_tx_desc_pkt_size(pdesc, (u16)skb->len); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; - SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); + set_tx_desc_ampdu_density(pdesc, ampdu_density); } if (info->control.hw_key) { @@ -515,77 +495,78 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: - SET_TX_DESC_SEC_TYPE(pdesc, 0x1); + set_tx_desc_sec_type(pdesc, 0x1); break; case WLAN_CIPHER_SUITE_CCMP: - SET_TX_DESC_SEC_TYPE(pdesc, 0x3); + set_tx_desc_sec_type(pdesc, 0x3); break; default: - SET_TX_DESC_SEC_TYPE(pdesc, 0x0); + set_tx_desc_sec_type(pdesc, 0x0); break; } } - SET_TX_DESC_PKT_ID(pdesc, 0); - SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel); + set_tx_desc_pkt_id(pdesc, 0); + set_tx_desc_queue_sel(pdesc, fw_qsel); - SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); - SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); - SET_TX_DESC_DISABLE_FB(pdesc, 0); - SET_TX_DESC_USE_RATE(pdesc, tcb_desc->use_driver_rate ? 1 : 0); + set_tx_desc_data_rate_fb_limit(pdesc, 0x1F); + set_tx_desc_rts_rate_fb_limit(pdesc, 0xF); + set_tx_desc_disable_fb(pdesc, 0); + set_tx_desc_use_rate(pdesc, tcb_desc->use_driver_rate ? 1 : 0); if (ieee80211_is_data_qos(fc)) { if (mac->rdg_en) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Enable RDG function\n"); - SET_TX_DESC_RDG_ENABLE(pdesc, 1); - SET_TX_DESC_HTC(pdesc, 1); + set_tx_desc_rdg_enable(pdesc, 1); + set_tx_desc_htc(pdesc, 1); } } } rcu_read_unlock(); - SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); - SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); + set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0)); + set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0)); - SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len); + set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len); - SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + set_tx_desc_tx_buffer_address(pdesc, mapping); if (rtlpriv->dm.useramask) { - SET_TX_DESC_RATE_ID(pdesc, tcb_desc->ratr_index); - SET_TX_DESC_MACID(pdesc, tcb_desc->mac_id); + set_tx_desc_rate_id(pdesc, tcb_desc->ratr_index); + set_tx_desc_macid(pdesc, tcb_desc->mac_id); } else { - SET_TX_DESC_RATE_ID(pdesc, 0xC + tcb_desc->ratr_index); - SET_TX_DESC_MACID(pdesc, tcb_desc->ratr_index); + set_tx_desc_rate_id(pdesc, 0xC + tcb_desc->ratr_index); + set_tx_desc_macid(pdesc, tcb_desc->ratr_index); } if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) { - SET_TX_DESC_HWSEQ_EN(pdesc, 1); - SET_TX_DESC_PKT_ID(pdesc, 8); + set_tx_desc_hwseq_en(pdesc, 1); + set_tx_desc_pkt_id(pdesc, 8); if (!defaultadapter) - SET_TX_DESC_QOS(pdesc, 1); + set_tx_desc_qos(pdesc, 1); } - SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); + set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1)); if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { - SET_TX_DESC_BMC(pdesc, 1); + set_tx_desc_bmc(pdesc, 1); } RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc, bool firstseg, + u8 *pdesc8, bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 fw_queue = QSLT_BEACON; + __le32 *pdesc = (__le32 *)pdesc8; dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, @@ -599,60 +580,62 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, "DMA mapping error\n"); return; } - CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); + clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); if (firstseg) - SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); - SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M); + set_tx_desc_tx_rate(pdesc, DESC_RATE1M); - SET_TX_DESC_SEQ(pdesc, 0); + set_tx_desc_seq(pdesc, 0); - SET_TX_DESC_LINIP(pdesc, 0); + set_tx_desc_linip(pdesc, 0); - SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); + set_tx_desc_queue_sel(pdesc, fw_queue); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); - SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len)); + set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len)); - SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + set_tx_desc_tx_buffer_address(pdesc, mapping); - SET_TX_DESC_RATE_ID(pdesc, 7); - SET_TX_DESC_MACID(pdesc, 0); + set_tx_desc_rate_id(pdesc, 7); + set_tx_desc_macid(pdesc, 0); - SET_TX_DESC_OWN(pdesc, 1); + set_tx_desc_own(pdesc, 1); - SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len)); + set_tx_desc_pkt_size(pdesc, (u16)(skb->len)); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); - SET_TX_DESC_OFFSET(pdesc, 0x20); + set_tx_desc_offset(pdesc, 0x20); - SET_TX_DESC_USE_RATE(pdesc, 1); + set_tx_desc_use_rate(pdesc, 1); if (!ieee80211_is_data_qos(fc)) { - SET_TX_DESC_HWSEQ_EN(pdesc, 1); - SET_TX_DESC_PKT_ID(pdesc, 8); + set_tx_desc_hwseq_en(pdesc, 1); + set_tx_desc_pkt_id(pdesc, 8); } RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content", pdesc, TX_DESC_SIZE); } -void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, +void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx, u8 desc_name, u8 *val) { + __le32 *pdesc = (__le32 *)pdesc8; + if (istx) { switch (desc_name) { case HW_DESC_OWN: wmb(); - SET_TX_DESC_OWN(pdesc, 1); + set_tx_desc_own(pdesc, 1); break; case HW_DESC_TX_NEXTDESC_ADDR: - SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val); + set_tx_desc_next_desc_address(pdesc, *(u32 *)val); break; default: WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n", @@ -663,16 +646,16 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, switch (desc_name) { case HW_DESC_RXOWN: wmb(); - SET_RX_DESC_OWN(pdesc, 1); + set_rx_desc_own(pdesc, 1); break; case HW_DESC_RXBUFF_ADDR: - SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val); + set_rx_desc_buff_addr(pdesc, *(u32 *)val); break; case HW_DESC_RXPKT_LEN: - SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val); + set_rx_desc_pkt_len(pdesc, *(u32 *)val); break; case HW_DESC_RXERO: - SET_RX_DESC_EOR(pdesc, 1); + set_rx_desc_eor(pdesc, 1); break; default: WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n", @@ -682,18 +665,19 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, } } -u64 rtl92ce_get_desc(struct ieee80211_hw *hw, u8 *p_desc, +u64 rtl92ce_get_desc(struct ieee80211_hw *hw, u8 *p_desc8, bool istx, u8 desc_name) { u32 ret = 0; + __le32 *p_desc = (__le32 *)p_desc8; if (istx) { switch (desc_name) { case HW_DESC_OWN: - ret = GET_TX_DESC_OWN(p_desc); + ret = get_tx_desc_own(p_desc); break; case HW_DESC_TXBUFF_ADDR: - ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc); + ret = get_tx_desc_tx_buffer_address(p_desc); break; default: WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n", @@ -703,13 +687,13 @@ u64 rtl92ce_get_desc(struct ieee80211_hw *hw, u8 *p_desc, } else { switch (desc_name) { case HW_DESC_OWN: - ret = GET_RX_DESC_OWN(p_desc); + ret = get_rx_desc_own(p_desc); break; case HW_DESC_RXPKT_LEN: - ret = GET_RX_DESC_PKT_LEN(p_desc); + ret = get_rx_desc_pkt_len(p_desc); break; case HW_DESC_RXBUFF_ADDR: - ret = GET_RX_DESC_BUFF_ADDR(p_desc); + ret = get_rx_desc_buff_addr(p_desc); break; default: WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n", diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h index fb1d4444a52f..b45b05a6a523 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h @@ -14,497 +14,322 @@ #define USB_HWDESC_HEADER_LEN 32 #define CRCLENGTH 4 -/* Define a macro that takes a le32 word, converts it to host ordering, - * right shifts by a specified count, creates a mask of the specified - * bit count, and extracts that number of bits. - */ - -#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \ - ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \ - BIT_LEN_MASK_32(__mask)) - -/* Define a macro that clears a bit field in an le32 word and - * sets the specified value into that bit field. The resulting - * value remains in le32 ordering; however, it is properly converted - * to host ordering for the clear and set operations before conversion - * back to le32. - */ - -#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \ - (*(__le32 *)(__pdesc) = \ - (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \ - (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \ - (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift))))); - /* macros to read/write various fields in RX or TX descriptors */ -#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val) -#define SET_TX_DESC_OFFSET(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val) -#define SET_TX_DESC_BMC(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val) -#define SET_TX_DESC_HTC(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val) -#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val) -#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val) -#define SET_TX_DESC_LINIP(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val) -#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val) -#define SET_TX_DESC_GF(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val) -#define SET_TX_DESC_OWN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val) - -#define GET_TX_DESC_PKT_SIZE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 0, 16) -#define GET_TX_DESC_OFFSET(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 16, 8) -#define GET_TX_DESC_BMC(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 24, 1) -#define GET_TX_DESC_HTC(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 25, 1) -#define GET_TX_DESC_LAST_SEG(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 26, 1) -#define GET_TX_DESC_FIRST_SEG(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 27, 1) -#define GET_TX_DESC_LINIP(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 28, 1) -#define GET_TX_DESC_NO_ACM(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 29, 1) -#define GET_TX_DESC_GF(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 30, 1) -#define GET_TX_DESC_OWN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 31, 1) - -#define SET_TX_DESC_MACID(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val) -#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val) -#define SET_TX_DESC_BK(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val) -#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val) -#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val) -#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val) -#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val) -#define SET_TX_DESC_PIFS(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val) -#define SET_TX_DESC_RATE_ID(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val) -#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val) -#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val) -#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val) -#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+4, 24, 8, __val) - -#define GET_TX_DESC_MACID(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 0, 5) -#define GET_TX_DESC_AGG_ENABLE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 5, 1) -#define GET_TX_DESC_AGG_BREAK(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 6, 1) -#define GET_TX_DESC_RDG_ENABLE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 7, 1) -#define GET_TX_DESC_QUEUE_SEL(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 8, 5) -#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 13, 1) -#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 14, 1) -#define GET_TX_DESC_PIFS(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 15, 1) -#define GET_TX_DESC_RATE_ID(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 16, 4) -#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 20, 1) -#define GET_TX_DESC_EN_DESC_ID(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 21, 1) -#define GET_TX_DESC_SEC_TYPE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 22, 2) -#define GET_TX_DESC_PKT_OFFSET(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 24, 8) - -#define SET_TX_DESC_RTS_RC(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val) -#define SET_TX_DESC_DATA_RC(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val) -#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val) -#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val) -#define SET_TX_DESC_RAW(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val) -#define SET_TX_DESC_CCX(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val) -#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val) -#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val) -#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val) -#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val) -#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val) -#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val) - -#define GET_TX_DESC_RTS_RC(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 0, 6) -#define GET_TX_DESC_DATA_RC(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 6, 6) -#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 14, 2) -#define GET_TX_DESC_MORE_FRAG(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 17, 1) -#define GET_TX_DESC_RAW(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 18, 1) -#define GET_TX_DESC_CCX(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 19, 1) -#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 20, 3) -#define GET_TX_DESC_ANTSEL_A(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 24, 1) -#define GET_TX_DESC_ANTSEL_B(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 25, 1) -#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 26, 2) -#define GET_TX_DESC_TX_ANTL(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 28, 2) -#define GET_TX_DESC_TX_ANT_HT(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 30, 2) - -#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val) -#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val) -#define SET_TX_DESC_SEQ(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val) -#define SET_TX_DESC_PKT_ID(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val) - -#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 0, 8) -#define GET_TX_DESC_TAIL_PAGE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 8, 8) -#define GET_TX_DESC_SEQ(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 16, 12) -#define GET_TX_DESC_PKT_ID(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 28, 4) - -#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val) -#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val) -#define SET_TX_DESC_QOS(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val) -#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val) -#define SET_TX_DESC_USE_RATE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val) -#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val) -#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val) -#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val) -#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val) -#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val) -#define SET_TX_DESC_PORT_ID(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val) -#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val) -#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val) -#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val) -#define SET_TX_DESC_TX_STBC(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val) -#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val) -#define SET_TX_DESC_DATA_BW(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val) -#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val) -#define SET_TX_DESC_RTS_BW(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val) -#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val) -#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val) - -#define GET_TX_DESC_RTS_RATE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 0, 5) -#define GET_TX_DESC_AP_DCFE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 5, 1) -#define GET_TX_DESC_QOS(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 6, 1) -#define GET_TX_DESC_HWSEQ_EN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 7, 1) -#define GET_TX_DESC_USE_RATE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 8, 1) -#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 9, 1) -#define GET_TX_DESC_DISABLE_FB(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 10, 1) -#define GET_TX_DESC_CTS2SELF(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 11, 1) -#define GET_TX_DESC_RTS_ENABLE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 12, 1) -#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 13, 1) -#define GET_TX_DESC_PORT_ID(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 14, 1) -#define GET_TX_DESC_WAIT_DCTS(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 18, 1) -#define GET_TX_DESC_CTS2AP_EN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 19, 1) -#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 20, 2) -#define GET_TX_DESC_TX_STBC(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 22, 2) -#define GET_TX_DESC_DATA_SHORT(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 24, 1) -#define GET_TX_DESC_DATA_BW(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 25, 1) -#define GET_TX_DESC_RTS_SHORT(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 26, 1) -#define GET_TX_DESC_RTS_BW(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 27, 1) -#define GET_TX_DESC_RTS_SC(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 28, 2) -#define GET_TX_DESC_RTS_STBC(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 30, 2) - -#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val) -#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val) -#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val) -#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val) -#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val) -#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val) -#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val) -#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val) - -#define GET_TX_DESC_TX_RATE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+20, 0, 6) -#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+20, 6, 1) -#define GET_TX_DESC_CCX_TAG(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+20, 7, 1) -#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+20, 8, 5) -#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+20, 13, 4) -#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+20, 17, 1) -#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+20, 18, 6) -#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+20, 24, 8) - -#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val) -#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val) -#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val) -#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val) -#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val) -#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val) -#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val) -#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val) - -#define GET_TX_DESC_TXAGC_A(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+24, 0, 5) -#define GET_TX_DESC_TXAGC_B(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+24, 5, 5) -#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+24, 10, 1) -#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+24, 11, 5) -#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+24, 16, 4) -#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+24, 20, 4) -#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+24, 24, 4) -#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+24, 28, 4) - -#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val) -#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val) -#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val) -#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val) -#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val) - -#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+28, 0, 16) -#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+28, 16, 4) -#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+28, 20, 4) -#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+28, 24, 4) -#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+28, 28, 4) - -#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val) -#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val) - -#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+32, 0, 32) -#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+36, 0, 32) - -#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val) -#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val) - -#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+40, 0, 32) -#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+44, 0, 32) - -#define GET_RX_DESC_PKT_LEN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 0, 14) -#define GET_RX_DESC_CRC32(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 14, 1) -#define GET_RX_DESC_ICV(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 15, 1) -#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 16, 4) -#define GET_RX_DESC_SECURITY(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 20, 3) -#define GET_RX_DESC_QOS(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 23, 1) -#define GET_RX_DESC_SHIFT(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 24, 2) -#define GET_RX_DESC_PHYST(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 26, 1) -#define GET_RX_DESC_SWDEC(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 27, 1) -#define GET_RX_DESC_LS(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 28, 1) -#define GET_RX_DESC_FS(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 29, 1) -#define GET_RX_DESC_EOR(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 30, 1) -#define GET_RX_DESC_OWN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc, 31, 1) - -#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val) -#define SET_RX_DESC_EOR(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val) -#define SET_RX_DESC_OWN(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val) - -#define GET_RX_DESC_MACID(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 0, 5) -#define GET_RX_DESC_TID(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 5, 4) -#define GET_RX_DESC_HWRSVD(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 9, 5) -#define GET_RX_DESC_PAGGR(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 14, 1) -#define GET_RX_DESC_FAGGR(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 15, 1) -#define GET_RX_DESC_A1_FIT(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 16, 4) -#define GET_RX_DESC_A2_FIT(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 20, 4) -#define GET_RX_DESC_PAM(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 24, 1) -#define GET_RX_DESC_PWR(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 25, 1) -#define GET_RX_DESC_MD(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 26, 1) -#define GET_RX_DESC_MF(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 27, 1) -#define GET_RX_DESC_TYPE(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 28, 2) -#define GET_RX_DESC_MC(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 30, 1) -#define GET_RX_DESC_BC(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+4, 31, 1) -#define GET_RX_DESC_SEQ(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 0, 12) -#define GET_RX_DESC_FRAG(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 12, 4) -#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 16, 14) -#define GET_RX_DESC_NEXT_IND(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 30, 1) -#define GET_RX_DESC_RSVD(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+8, 31, 1) - -#define GET_RX_DESC_RXMCS(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 0, 6) -#define GET_RX_DESC_RXHT(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 6, 1) -#define GET_RX_DESC_SPLCP(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 8, 1) -#define GET_RX_DESC_BW(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 9, 1) -#define GET_RX_DESC_HTC(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 10, 1) -#define GET_RX_DESC_HWPC_ERR(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 14, 1) -#define GET_RX_DESC_HWPC_IND(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 15, 1) -#define GET_RX_DESC_IV0(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+12, 16, 16) - -#define GET_RX_DESC_IV1(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+16, 0, 32) -#define GET_RX_DESC_TSFL(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+20, 0, 32) - -#define GET_RX_DESC_BUFF_ADDR(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+24, 0, 32) -#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ - SHIFT_AND_MASK_LE(__pdesc+28, 0, 32) - -#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val) -#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val) - -#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ - memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET)) +static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(15, 0)); +} + +static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(23, 16)); +} + +static inline void set_tx_desc_bmc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(24)); +} + +static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(25)); +} + +static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(26)); +} + +static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(27)); +} + +static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(28)); +} + +static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} + +static inline int get_tx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(31)); +} + +static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0)); +} + +static inline void set_tx_desc_agg_break(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, BIT(5)); +} + +static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, BIT(7)); +} + +static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16)); +} + +static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22)); +} + +static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, BIT(17)); +} + +static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20)); +} + +static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16)); +} + +static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28)); +} + +static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0)); +} + +static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(6)); +} + +static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(7)); +} + +static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(8)); +} + +static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(10)); +} + +static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(11)); +} + +static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(12)); +} + +static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(13)); +} + +static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20)); +} + +static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(25)); +} + +static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(26)); +} + +static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(27)); +} + +static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28)); +} + +static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30)); +} + +static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0)); +} + +static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, BIT(6)); +} + +static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13)); +} + +static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11)); +} + +static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0)); +} + +static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 8) = cpu_to_le32(__val); +} + +static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc) +{ + return le32_to_cpu(*((__pdesc + 8))); +} + +static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 10) = cpu_to_le32(__val); +} + +static inline int get_rx_desc_pkt_len(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), GENMASK(13, 0)); +} + +static inline int get_rx_desc_crc32(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(14)); +} + +static inline int get_rx_desc_icv(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(15)); +} + +static inline int get_rx_desc_drv_info_size(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), GENMASK(19, 16)); +} + +static inline int get_rx_desc_shift(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), GENMASK(25, 24)); +} + +static inline int get_rx_desc_physt(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(26)); +} + +static inline int get_rx_desc_swdec(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(27)); +} + +static inline int get_rx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(31)); +} + +static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(13, 0)); +} + +static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(30)); +} + +static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} + +static inline int get_rx_desc_paggr(__le32 *__pdesc) +{ + return le32_get_bits(*((__pdesc + 1)), BIT(14)); +} + +static inline int get_rx_desc_faggr(__le32 *__pdesc) +{ + return le32_get_bits(*((__pdesc + 1)), BIT(15)); +} + +static inline int get_rx_desc_rxmcs(__le32 *__pdesc) +{ + return le32_get_bits(*((__pdesc + 3)), GENMASK(5, 0)); +} + +static inline int get_rx_desc_rxht(__le32 *__pdesc) +{ + return le32_get_bits(*((__pdesc + 3)), BIT(6)); +} + +static inline int get_rx_desc_splcp(__le32 *__pdesc) +{ + return le32_get_bits(*((__pdesc + 3)), BIT(8)); +} + +static inline int get_rx_desc_bw(__le32 *__pdesc) +{ + return le32_get_bits(*((__pdesc + 3)), BIT(9)); +} + +static inline u32 get_rx_desc_tsfl(__le32 *__pdesc) +{ + return le32_to_cpu(*((__pdesc + 5))); +} + +static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc) +{ + return le32_to_cpu(*((__pdesc + 6))); +} + +static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 6) = cpu_to_le32(__val); +} + +static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size) +{ + memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET)); +} struct rx_fwinfo_92c { u8 gain_trsw[4]; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c index b3ce8000d52d..cec19b32c7e2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c @@ -577,22 +577,6 @@ static u8 _rtl92c_query_rxpwrpercentage(s8 antpower) return 100 + antpower; } -static u8 _rtl92c_evm_db_to_percentage(s8 value) -{ - s8 ret_val; - - ret_val = value; - if (ret_val >= 0) - ret_val = 0; - if (ret_val <= -33) - ret_val = -33; - ret_val = 0 - ret_val; - ret_val *= 3; - if (ret_val == 99) - ret_val = 100; - return ret_val; -} - static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw, long currsig) { @@ -638,7 +622,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, u32 rssi, total_rssi = 0; bool in_powersavemode = false; bool is_cck_rate; - u8 *pdesc = (u8 *)p_desc; + __le32 *pdesc = (__le32 *)p_desc; is_cck_rate = RX_HAL_IS_CCK_RATE(p_desc->rxmcs); pstats->packet_matchbssid = packet_match_bssid; @@ -736,14 +720,14 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, pstats->rx_pwdb_all = pwdb_all; pstats->rxpower = rx_pwr_all; pstats->recvsignalpower = rx_pwr_all; - if (GET_RX_DESC_RX_MCS(pdesc) && - GET_RX_DESC_RX_MCS(pdesc) >= DESC_RATEMCS8 && - GET_RX_DESC_RX_MCS(pdesc) <= DESC_RATEMCS15) + if (get_rx_desc_rx_mcs(pdesc) && + get_rx_desc_rx_mcs(pdesc) >= DESC_RATEMCS8 && + get_rx_desc_rx_mcs(pdesc) <= DESC_RATEMCS15) max_spatial_stream = 2; else max_spatial_stream = 1; for (i = 0; i < max_spatial_stream; i++) { - evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]); + evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]); if (packet_match_bssid) { if (i == 0) pstats->signalquality = diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index c1c34dca39d2..ab3e4aebad39 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -39,8 +39,6 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; - rtlpriv->cfg->mod_params->sw_crypto = - rtlpriv->cfg->mod_params->sw_crypto; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x4000); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index 0020adc004a5..fc526477740f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -282,44 +282,45 @@ out: bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, struct ieee80211_rx_status *rx_status, - u8 *pdesc, struct sk_buff *skb) + u8 *pdesc8, struct sk_buff *skb) { struct rx_fwinfo_92c *p_drvinfo; - struct rx_desc_92c *p_desc = (struct rx_desc_92c *)pdesc; - u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc); + struct rx_desc_92c *p_desc = (struct rx_desc_92c *)pdesc8; + __le32 *pdesc = (__le32 *)pdesc8; + u32 phystatus = get_rx_desc_phy_status(pdesc); - stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); - stats->rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(pdesc) * + stats->length = (u16)get_rx_desc_pkt_len(pdesc); + stats->rx_drvinfo_size = (u8)get_rx_desc_drvinfo_size(pdesc) * RX_DRV_INFO_SIZE_UNIT; - stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); - stats->icv = (u16) GET_RX_DESC_ICV(pdesc); - stats->crc = (u16) GET_RX_DESC_CRC32(pdesc); + stats->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03); + stats->icv = (u16)get_rx_desc_icv(pdesc); + stats->crc = (u16)get_rx_desc_crc32(pdesc); stats->hwerror = (stats->crc | stats->icv); - stats->decrypted = !GET_RX_DESC_SWDEC(pdesc); - stats->rate = (u8) GET_RX_DESC_RX_MCS(pdesc); - stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); - stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); - stats->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1) - && (GET_RX_DESC_FAGGR(pdesc) == 1)); - stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); - stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc); - stats->is_ht = (bool)GET_RX_DESC_RX_HT(pdesc); + stats->decrypted = !get_rx_desc_swdec(pdesc); + stats->rate = (u8)get_rx_desc_rx_mcs(pdesc); + stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc); + stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1); + stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) && + (get_rx_desc_faggr(pdesc) == 1)); + stats->timestamp_low = get_rx_desc_tsfl(pdesc); + stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc); + stats->is_ht = (bool)get_rx_desc_rx_ht(pdesc); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; - if (GET_RX_DESC_CRC32(pdesc)) + if (get_rx_desc_crc32(pdesc)) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (!GET_RX_DESC_SWDEC(pdesc)) + if (!get_rx_desc_swdec(pdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; - if (GET_RX_DESC_BW(pdesc)) + if (get_rx_desc_bw(pdesc)) rx_status->bw = RATE_INFO_BW_40; - if (GET_RX_DESC_RX_HT(pdesc)) + if (get_rx_desc_rx_ht(pdesc)) rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; if (stats->decrypted) rx_status->flag |= RX_FLAG_DECRYPTED; rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht, false, stats->rate); - rx_status->mactime = GET_RX_DESC_TSFL(pdesc); + rx_status->mactime = get_rx_desc_tsfl(pdesc); if (phystatus) { p_drvinfo = (struct rx_fwinfo_92c *)(skb->data + stats->rx_bufshift); @@ -339,7 +340,7 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb) (struct ieee80211_rx_status *)IEEE80211_SKB_RXCB(skb); u32 skb_len, pkt_len, drvinfo_len; struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 *rxdesc; + __le32 *rxdesc; struct rtl_stats stats = { .signal = 0, .rate = 0, @@ -350,44 +351,44 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb) struct ieee80211_hdr *hdr; memset(rx_status, 0, sizeof(*rx_status)); - rxdesc = skb->data; + rxdesc = (__le32 *)skb->data; skb_len = skb->len; - drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT); - pkt_len = GET_RX_DESC_PKT_LEN(rxdesc); + drvinfo_len = (get_rx_desc_drvinfo_size(rxdesc) * RTL_RX_DRV_INFO_UNIT); + pkt_len = get_rx_desc_pkt_len(rxdesc); /* TODO: Error recovery. drop this skb or something. */ WARN_ON(skb_len < (pkt_len + RTL_RX_DESC_SIZE + drvinfo_len)); - stats.length = (u16) GET_RX_DESC_PKT_LEN(rxdesc); - stats.rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(rxdesc) * + stats.length = (u16)get_rx_desc_pkt_len(rxdesc); + stats.rx_drvinfo_size = (u8)get_rx_desc_drvinfo_size(rxdesc) * RX_DRV_INFO_SIZE_UNIT; - stats.rx_bufshift = (u8) (GET_RX_DESC_SHIFT(rxdesc) & 0x03); - stats.icv = (u16) GET_RX_DESC_ICV(rxdesc); - stats.crc = (u16) GET_RX_DESC_CRC32(rxdesc); + stats.rx_bufshift = (u8)(get_rx_desc_shift(rxdesc) & 0x03); + stats.icv = (u16)get_rx_desc_icv(rxdesc); + stats.crc = (u16)get_rx_desc_crc32(rxdesc); stats.hwerror = (stats.crc | stats.icv); - stats.decrypted = !GET_RX_DESC_SWDEC(rxdesc); - stats.rate = (u8) GET_RX_DESC_RX_MCS(rxdesc); - stats.shortpreamble = (u16) GET_RX_DESC_SPLCP(rxdesc); - stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1) - && (GET_RX_DESC_FAGGR(rxdesc) == 1)); - stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc); - stats.rx_is40mhzpacket = (bool)GET_RX_DESC_BW(rxdesc); - stats.is_ht = (bool)GET_RX_DESC_RX_HT(rxdesc); + stats.decrypted = !get_rx_desc_swdec(rxdesc); + stats.rate = (u8)get_rx_desc_rx_mcs(rxdesc); + stats.shortpreamble = (u16)get_rx_desc_splcp(rxdesc); + stats.isampdu = (bool)((get_rx_desc_paggr(rxdesc) == 1) && + (get_rx_desc_faggr(rxdesc) == 1)); + stats.timestamp_low = get_rx_desc_tsfl(rxdesc); + stats.rx_is40mhzpacket = (bool)get_rx_desc_bw(rxdesc); + stats.is_ht = (bool)get_rx_desc_rx_ht(rxdesc); /* TODO: is center_freq changed when doing scan? */ /* TODO: Shall we add protection or just skip those two step? */ rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; - if (GET_RX_DESC_CRC32(rxdesc)) + if (get_rx_desc_crc32(rxdesc)) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (!GET_RX_DESC_SWDEC(rxdesc)) + if (!get_rx_desc_swdec(rxdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; - if (GET_RX_DESC_BW(rxdesc)) + if (get_rx_desc_bw(rxdesc)) rx_status->bw = RATE_INFO_BW_40; - if (GET_RX_DESC_RX_HT(rxdesc)) + if (get_rx_desc_rx_ht(rxdesc)) rx_status->encoding = RX_ENC_HT; /* Data rate */ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats.is_ht, false, stats.rate); /* There is a phy status after this rx descriptor. */ - if (GET_RX_DESC_PHY_STATUS(rxdesc)) { + if (get_rx_desc_phy_status(rxdesc)) { p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE); rtl92c_translate_rx_signal_stuff(hw, skb, &stats, (struct rx_desc_92c *)rxdesc, p_drvinfo); @@ -440,27 +441,27 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *hw, /*======================================== trx ===============================*/ -static void _rtl_fill_usb_tx_desc(u8 *txdesc) +static void _rtl_fill_usb_tx_desc(__le32 *txdesc) { - SET_TX_DESC_OWN(txdesc, 1); - SET_TX_DESC_LAST_SEG(txdesc, 1); - SET_TX_DESC_FIRST_SEG(txdesc, 1); + set_tx_desc_own(txdesc, 1); + set_tx_desc_last_seg(txdesc, 1); + set_tx_desc_first_seg(txdesc, 1); } /** * For HW recovery information */ -static void _rtl_tx_desc_checksum(u8 *txdesc) +static void _rtl_tx_desc_checksum(__le32 *txdesc) { __le16 *ptr = (__le16 *)txdesc; u16 checksum = 0; u32 index; /* Clear first */ - SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); + set_tx_desc_tx_desc_checksum(txdesc, 0); for (index = 0; index < 16; index++) checksum = checksum ^ le16_to_cpu(*(ptr + index)); - SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum); + set_tx_desc_tx_desc_checksum(txdesc, checksum); } void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, @@ -483,61 +484,65 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, u16 pktlen = skb->len; enum rtl_desc_qsel fw_qsel = _rtl8192cu_mq_to_descq(hw, fc, skb_get_queue_mapping(skb)); - u8 *txdesc; + u8 *txdesc8; + __le32 *txdesc; seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc); - txdesc = skb_push(skb, RTL_TX_HEADER_SIZE); + txdesc8 = skb_push(skb, RTL_TX_HEADER_SIZE); + txdesc = (__le32 *)txdesc8; memset(txdesc, 0, RTL_TX_HEADER_SIZE); - SET_TX_DESC_PKT_SIZE(txdesc, pktlen); - SET_TX_DESC_LINIP(txdesc, 0); - SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET); - SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE); - SET_TX_DESC_TX_RATE(txdesc, tcb_desc->hw_rate); + set_tx_desc_pkt_size(txdesc, pktlen); + set_tx_desc_linip(txdesc, 0); + set_tx_desc_pkt_offset(txdesc, RTL_DUMMY_OFFSET); + set_tx_desc_offset(txdesc, RTL_TX_HEADER_SIZE); + set_tx_desc_tx_rate(txdesc, tcb_desc->hw_rate); if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble) - SET_TX_DESC_DATA_SHORTGI(txdesc, 1); + set_tx_desc_data_shortgi(txdesc, 1); if (mac->tids[tid].agg.agg_state == RTL_AGG_ON && info->flags & IEEE80211_TX_CTL_AMPDU) { - SET_TX_DESC_AGG_ENABLE(txdesc, 1); - SET_TX_DESC_MAX_AGG_NUM(txdesc, 0x14); + set_tx_desc_agg_enable(txdesc, 1); + set_tx_desc_max_agg_num(txdesc, 0x14); } else { - SET_TX_DESC_AGG_BREAK(txdesc, 1); + set_tx_desc_agg_break(txdesc, 1); } - SET_TX_DESC_SEQ(txdesc, seq_number); - SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc->rts_enable && - !tcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc->rts_enable || - tcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc->rts_stbc) ? 1 : 0)); - SET_TX_DESC_RTS_RATE(txdesc, tcb_desc->rts_rate); - SET_TX_DESC_RTS_BW(txdesc, 0); - SET_TX_DESC_RTS_SC(txdesc, tcb_desc->rts_sc); - SET_TX_DESC_RTS_SHORT(txdesc, + set_tx_desc_seq(txdesc, seq_number); + set_tx_desc_rts_enable(txdesc, + ((tcb_desc->rts_enable && + !tcb_desc->cts_enable) ? 1 : 0)); + set_tx_desc_hw_rts_enable(txdesc, + ((tcb_desc->rts_enable || + tcb_desc->cts_enable) ? 1 : 0)); + set_tx_desc_cts2self(txdesc, ((tcb_desc->cts_enable) ? 1 : 0)); + set_tx_desc_rts_stbc(txdesc, ((tcb_desc->rts_stbc) ? 1 : 0)); + set_tx_desc_rts_rate(txdesc, tcb_desc->rts_rate); + set_tx_desc_rts_bw(txdesc, 0); + set_tx_desc_rts_sc(txdesc, tcb_desc->rts_sc); + set_tx_desc_rts_short(txdesc, ((tcb_desc->rts_rate <= DESC_RATE54M) ? (tcb_desc->rts_use_shortpreamble ? 1 : 0) : (tcb_desc->rts_use_shortgi ? 1 : 0))); if (mac->bw_40) { if (rate_flag & IEEE80211_TX_RC_DUP_DATA) { - SET_TX_DESC_DATA_BW(txdesc, 1); - SET_TX_DESC_DATA_SC(txdesc, 3); + set_tx_desc_data_bw(txdesc, 1); + set_tx_desc_data_sc(txdesc, 3); } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){ - SET_TX_DESC_DATA_BW(txdesc, 1); - SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc); + set_tx_desc_data_bw(txdesc, 1); + set_tx_desc_data_sc(txdesc, mac->cur_40_prime_sc); } else { - SET_TX_DESC_DATA_BW(txdesc, 0); - SET_TX_DESC_DATA_SC(txdesc, 0); + set_tx_desc_data_bw(txdesc, 0); + set_tx_desc_data_sc(txdesc, 0); } } else { - SET_TX_DESC_DATA_BW(txdesc, 0); - SET_TX_DESC_DATA_SC(txdesc, 0); + set_tx_desc_data_bw(txdesc, 0); + set_tx_desc_data_sc(txdesc, 0); } rcu_read_lock(); sta = ieee80211_find_sta(mac->vif, mac->bssid); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; - SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density); + set_tx_desc_ampdu_density(txdesc, ampdu_density); } rcu_read_unlock(); if (info->control.hw_key) { @@ -547,107 +552,110 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: - SET_TX_DESC_SEC_TYPE(txdesc, 0x1); + set_tx_desc_sec_type(txdesc, 0x1); break; case WLAN_CIPHER_SUITE_CCMP: - SET_TX_DESC_SEC_TYPE(txdesc, 0x3); + set_tx_desc_sec_type(txdesc, 0x3); break; default: - SET_TX_DESC_SEC_TYPE(txdesc, 0x0); + set_tx_desc_sec_type(txdesc, 0x0); break; } } - SET_TX_DESC_PKT_ID(txdesc, 0); - SET_TX_DESC_QUEUE_SEL(txdesc, fw_qsel); - SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F); - SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF); - SET_TX_DESC_DISABLE_FB(txdesc, 0); - SET_TX_DESC_USE_RATE(txdesc, tcb_desc->use_driver_rate ? 1 : 0); + set_tx_desc_pkt_id(txdesc, 0); + set_tx_desc_queue_sel(txdesc, fw_qsel); + set_tx_desc_data_rate_fb_limit(txdesc, 0x1F); + set_tx_desc_rts_rate_fb_limit(txdesc, 0xF); + set_tx_desc_disable_fb(txdesc, 0); + set_tx_desc_use_rate(txdesc, tcb_desc->use_driver_rate ? 1 : 0); if (ieee80211_is_data_qos(fc)) { if (mac->rdg_en) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Enable RDG function\n"); - SET_TX_DESC_RDG_ENABLE(txdesc, 1); - SET_TX_DESC_HTC(txdesc, 1); + set_tx_desc_rdg_enable(txdesc, 1); + set_tx_desc_htc(txdesc, 1); } } if (rtlpriv->dm.useramask) { - SET_TX_DESC_RATE_ID(txdesc, tcb_desc->ratr_index); - SET_TX_DESC_MACID(txdesc, tcb_desc->mac_id); + set_tx_desc_rate_id(txdesc, tcb_desc->ratr_index); + set_tx_desc_macid(txdesc, tcb_desc->mac_id); } else { - SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc->ratr_index); - SET_TX_DESC_MACID(txdesc, tcb_desc->ratr_index); + set_tx_desc_rate_id(txdesc, 0xC + tcb_desc->ratr_index); + set_tx_desc_macid(txdesc, tcb_desc->ratr_index); } if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps && ppsc->fwctrl_lps) { - SET_TX_DESC_HWSEQ_EN(txdesc, 1); - SET_TX_DESC_PKT_ID(txdesc, 8); + set_tx_desc_hwseq_en(txdesc, 1); + set_tx_desc_pkt_id(txdesc, 8); if (!defaultadapter) - SET_TX_DESC_QOS(txdesc, 1); + set_tx_desc_qos(txdesc, 1); } if (ieee80211_has_morefrags(fc)) - SET_TX_DESC_MORE_FRAG(txdesc, 1); + set_tx_desc_more_frag(txdesc, 1); if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) - SET_TX_DESC_BMC(txdesc, 1); + set_tx_desc_bmc(txdesc, 1); _rtl_fill_usb_tx_desc(txdesc); _rtl_tx_desc_checksum(txdesc); RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n"); } -void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc, +void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc8, u32 buffer_len, bool is_pspoll) { + __le32 *pdesc = (__le32 *)pdesc8; + /* Clear all status */ memset(pdesc, 0, RTL_TX_HEADER_SIZE); - SET_TX_DESC_FIRST_SEG(pdesc, 1); /* bFirstSeg; */ - SET_TX_DESC_LAST_SEG(pdesc, 1); /* bLastSeg; */ - SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */ - SET_TX_DESC_PKT_SIZE(pdesc, buffer_len); /* Buffer size + command hdr */ - SET_TX_DESC_QUEUE_SEL(pdesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */ + set_tx_desc_first_seg(pdesc, 1); /* bFirstSeg; */ + set_tx_desc_last_seg(pdesc, 1); /* bLastSeg; */ + set_tx_desc_offset(pdesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */ + set_tx_desc_pkt_size(pdesc, buffer_len); /* Buffer size + command hdr */ + set_tx_desc_queue_sel(pdesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error * vlaue by Hw. */ if (is_pspoll) { - SET_TX_DESC_NAV_USE_HDR(pdesc, 1); + set_tx_desc_nav_use_hdr(pdesc, 1); } else { - SET_TX_DESC_HWSEQ_EN(pdesc, 1); /* Hw set sequence number */ - SET_TX_DESC_PKT_ID(pdesc, 0x100); /* set bit3 to 1. */ + set_tx_desc_hwseq_en(pdesc, 1); /* Hw set sequence number */ + set_tx_desc_pkt_id(pdesc, BIT(3)); /* set bit3 to 1. */ } - SET_TX_DESC_USE_RATE(pdesc, 1); /* use data rate which is set by Sw */ - SET_TX_DESC_OWN(pdesc, 1); - SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M); + set_tx_desc_use_rate(pdesc, 1); /* use data rate which is set by Sw */ + set_tx_desc_own(pdesc, 1); + set_tx_desc_tx_rate(pdesc, DESC_RATE1M); _rtl_tx_desc_checksum(pdesc); } void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc, bool firstseg, + u8 *pdesc8, bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 fw_queue = QSLT_BEACON; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); __le16 fc = hdr->frame_control; + __le32 *pdesc = (__le32 *)pdesc8; memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE); if (firstseg) - SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE); - SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M); - SET_TX_DESC_SEQ(pdesc, 0); - SET_TX_DESC_LINIP(pdesc, 0); - SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); - SET_TX_DESC_RATE_ID(pdesc, 7); - SET_TX_DESC_MACID(pdesc, 0); - SET_TX_DESC_OWN(pdesc, 1); - SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb->len); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); - SET_TX_DESC_OFFSET(pdesc, 0x20); - SET_TX_DESC_USE_RATE(pdesc, 1); + set_tx_desc_offset(pdesc, RTL_TX_HEADER_SIZE); + set_tx_desc_tx_rate(pdesc, DESC_RATE1M); + set_tx_desc_seq(pdesc, 0); + set_tx_desc_linip(pdesc, 0); + set_tx_desc_queue_sel(pdesc, fw_queue); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); + set_tx_desc_rate_id(pdesc, 7); + set_tx_desc_macid(pdesc, 0); + set_tx_desc_own(pdesc, 1); + set_tx_desc_pkt_size(pdesc, (u16)skb->len); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); + set_tx_desc_offset(pdesc, 0x20); + set_tx_desc_use_rate(pdesc, 1); if (!ieee80211_is_data_qos(fc)) { - SET_TX_DESC_HWSEQ_EN(pdesc, 1); - SET_TX_DESC_PKT_ID(pdesc, 8); + set_tx_desc_hwseq_en(pdesc, 1); + set_tx_desc_pkt_id(pdesc, 8); } RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content", pdesc, RTL_TX_DESC_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h index ae2e8aa212de..171fe39dfb0c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h @@ -73,286 +73,307 @@ struct rx_drv_info_92c { /* macros to read various fields in RX descriptor */ /* DWORD 0 */ -#define GET_RX_DESC_PKT_LEN(__rxdesc) \ - LE_BITS_TO_4BYTE((__rxdesc), 0, 14) -#define GET_RX_DESC_CRC32(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 14, 1) -#define GET_RX_DESC_ICV(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 15, 1) -#define GET_RX_DESC_DRVINFO_SIZE(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 16, 4) -#define GET_RX_DESC_SECURITY(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 20, 3) -#define GET_RX_DESC_QOS(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 23, 1) -#define GET_RX_DESC_SHIFT(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 24, 2) -#define GET_RX_DESC_PHY_STATUS(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 26, 1) -#define GET_RX_DESC_SWDEC(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 27, 1) -#define GET_RX_DESC_LAST_SEG(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 28, 1) -#define GET_RX_DESC_FIRST_SEG(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 29, 1) -#define GET_RX_DESC_EOR(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 30, 1) -#define GET_RX_DESC_OWN(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc, 31, 1) +static inline u32 get_rx_desc_pkt_len(__le32 *__rxdesc) +{ + return le32_get_bits(*__rxdesc, GENMASK(13, 0)); +} + +static inline u32 get_rx_desc_crc32(__le32 *__rxdesc) +{ + return le32_get_bits(*__rxdesc, BIT(14)); +} + +static inline u32 get_rx_desc_icv(__le32 *__rxdesc) +{ + return le32_get_bits(*__rxdesc, BIT(15)); +} + +static inline u32 get_rx_desc_drvinfo_size(__le32 *__rxdesc) +{ + return le32_get_bits(*__rxdesc, GENMASK(19, 16)); +} + +static inline u32 get_rx_desc_shift(__le32 *__rxdesc) +{ + return le32_get_bits(*__rxdesc, GENMASK(25, 24)); +} + +static inline u32 get_rx_desc_phy_status(__le32 *__rxdesc) +{ + return le32_get_bits(*__rxdesc, BIT(26)); +} + +static inline u32 get_rx_desc_swdec(__le32 *__rxdesc) +{ + return le32_get_bits(*__rxdesc, BIT(27)); +} + /* DWORD 1 */ -#define GET_RX_DESC_MACID(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 0, 5) -#define GET_RX_DESC_TID(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 5, 4) -#define GET_RX_DESC_PAGGR(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 14, 1) -#define GET_RX_DESC_FAGGR(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 15, 1) -#define GET_RX_DESC_A1_FIT(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 16, 4) -#define GET_RX_DESC_A2_FIT(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 20, 4) -#define GET_RX_DESC_PAM(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 24, 1) -#define GET_RX_DESC_PWR(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 25, 1) -#define GET_RX_DESC_MORE_DATA(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 26, 1) -#define GET_RX_DESC_MORE_FRAG(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 27, 1) -#define GET_RX_DESC_TYPE(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 28, 2) -#define GET_RX_DESC_MC(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 30, 1) -#define GET_RX_DESC_BC(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 4, 31, 1) - -/* DWORD 2 */ -#define GET_RX_DESC_SEQ(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 8, 0, 12) -#define GET_RX_DESC_FRAG(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 8, 12, 4) -#define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 8, 16, 8) -#define GET_RX_DESC_NEXT_IND(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 8, 30, 1) +static inline u32 get_rx_desc_paggr(__le32 *__rxdesc) +{ + return le32_get_bits(*(__rxdesc + 1), BIT(14)); +} + +static inline u32 get_rx_desc_faggr(__le32 *__rxdesc) +{ + return le32_get_bits(*(__rxdesc + 1), BIT(15)); +} + /* DWORD 3 */ -#define GET_RX_DESC_RX_MCS(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 0, 6) -#define GET_RX_DESC_RX_HT(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 6, 1) -#define GET_RX_DESC_AMSDU(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 7, 1) -#define GET_RX_DESC_SPLCP(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 8, 1) -#define GET_RX_DESC_BW(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 9, 1) -#define GET_RX_DESC_HTC(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 10, 1) -#define GET_RX_DESC_TCP_CHK_RPT(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 11, 1) -#define GET_RX_DESC_IP_CHK_RPT(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 12, 1) -#define GET_RX_DESC_TCP_CHK_VALID(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 13, 1) -#define GET_RX_DESC_HWPC_ERR(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 14, 1) -#define GET_RX_DESC_HWPC_IND(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 15, 1) -#define GET_RX_DESC_IV0(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 12, 16, 16) - -/* DWORD 4 */ -#define GET_RX_DESC_IV1(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 16, 0, 32) +static inline u32 get_rx_desc_rx_mcs(__le32 *__rxdesc) +{ + return le32_get_bits(*(__rxdesc + 3), GENMASK(5, 0)); +} + +static inline u32 get_rx_desc_rx_ht(__le32 *__rxdesc) +{ + return le32_get_bits(*(__rxdesc + 3), BIT(6)); +} + +static inline u32 get_rx_desc_splcp(__le32 *__rxdesc) +{ + return le32_get_bits(*(__rxdesc + 3), BIT(8)); +} + +static inline u32 get_rx_desc_bw(__le32 *__rxdesc) +{ + return le32_get_bits(*(__rxdesc + 3), BIT(9)); +} + /* DWORD 5 */ -#define GET_RX_DESC_TSFL(__rxdesc) \ - LE_BITS_TO_4BYTE(__rxdesc + 20, 0, 32) +static inline u32 get_rx_desc_tsfl(__le32 *__rxdesc) +{ + return le32_to_cpu(*((__rxdesc + 5))); +} + /*======================= tx desc ============================================*/ /* macros to set various fields in TX descriptor */ /* Dword 0 */ -#define SET_TX_DESC_PKT_SIZE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc, 0, 16, __value) -#define SET_TX_DESC_OFFSET(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc, 16, 8, __value) -#define SET_TX_DESC_BMC(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc, 24, 1, __value) -#define SET_TX_DESC_HTC(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc, 25, 1, __value) -#define SET_TX_DESC_LAST_SEG(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc, 26, 1, __value) -#define SET_TX_DESC_FIRST_SEG(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc, 27, 1, __value) -#define SET_TX_DESC_LINIP(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc, 28, 1, __value) -#define SET_TX_DESC_NO_ACM(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc, 29, 1, __value) -#define SET_TX_DESC_GF(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc, 30, 1, __value) -#define SET_TX_DESC_OWN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc, 31, 1, __value) +static inline void set_tx_desc_pkt_size(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits(__txdesc, __value, GENMASK(15, 0)); +} + +static inline void set_tx_desc_offset(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits(__txdesc, __value, GENMASK(23, 16)); +} + +static inline void set_tx_desc_bmc(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits(__txdesc, __value, BIT(24)); +} + +static inline void set_tx_desc_htc(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits(__txdesc, __value, BIT(25)); +} + +static inline void set_tx_desc_last_seg(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits(__txdesc, __value, BIT(26)); +} + +static inline void set_tx_desc_first_seg(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits(__txdesc, __value, BIT(27)); +} + +static inline void set_tx_desc_linip(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits(__txdesc, __value, BIT(28)); +} + +static inline void set_tx_desc_own(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits(__txdesc, __value, BIT(31)); +} + /* Dword 1 */ -#define SET_TX_DESC_MACID(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 0, 5, __value) -#define SET_TX_DESC_AGG_ENABLE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 5, 1, __value) -#define SET_TX_DESC_AGG_BREAK(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 6, 1, __value) -#define SET_TX_DESC_RDG_ENABLE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 7, 1, __value) -#define SET_TX_DESC_QUEUE_SEL(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 8, 5, __value) -#define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 13, 1, __value) -#define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 14, 1, __value) -#define SET_TX_DESC_PIFS(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 15, 1, __value) -#define SET_TX_DESC_RATE_ID(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 16, 4, __value) -#define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 16, 4, __value) -#define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 20, 1, __value) -#define SET_TX_DESC_EN_DESC_ID(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 21, 1, __value) -#define SET_TX_DESC_SEC_TYPE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 22, 2, __value) -#define SET_TX_DESC_PKT_OFFSET(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 4, 26, 5, __value) +static inline void set_tx_desc_macid(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 1), __value, GENMASK(4, 0)); +} + +static inline void set_tx_desc_agg_enable(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 1), __value, BIT(5)); +} + +static inline void set_tx_desc_agg_break(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 1), __value, BIT(6)); +} + +static inline void set_tx_desc_rdg_enable(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 1), __value, BIT(7)); +} + +static inline void set_tx_desc_queue_sel(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 1), __value, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rate_id(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 1), __value, GENMASK(19, 16)); +} + +static inline void set_tx_desc_nav_use_hdr(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 1), __value, BIT(20)); +} + +static inline void set_tx_desc_sec_type(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 1), __value, GENMASK(23, 22)); +} + +static inline void set_tx_desc_pkt_offset(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 1), __value, GENMASK(30, 26)); +} + /* Dword 2 */ -#define SET_TX_DESC_RTS_RC(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 0, 6, __value) -#define SET_TX_DESC_DATA_RC(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 6, 6, __value) -#define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 14, 2, __value) -#define SET_TX_DESC_MORE_FRAG(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 17, 1, __value) -#define SET_TX_DESC_RAW(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 18, 1, __value) -#define SET_TX_DESC_CCX(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 19, 1, __value) -#define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 20, 3, __value) -#define SET_TX_DESC_ANTSEL_A(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 24, 1, __value) -#define SET_TX_DESC_ANTSEL_B(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 25, 1, __value) -#define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 26, 2, __value) -#define SET_TX_DESC_TX_ANTL(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 28, 2, __value) -#define SET_TX_DESC_TX_ANT_HT(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 8, 30, 2, __value) +static inline void set_tx_desc_more_frag(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 2), __value, BIT(17)); +} + +static inline void set_tx_desc_ampdu_density(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 2), __value, GENMASK(22, 20)); +} + /* Dword 3 */ -#define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 12, 0, 8, __value) -#define SET_TX_DESC_TAIL_PAGE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 12, 8, 8, __value) -#define SET_TX_DESC_SEQ(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 12, 16, 12, __value) -#define SET_TX_DESC_PKT_ID(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 12, 28, 4, __value) +static inline void set_tx_desc_seq(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 3), __value, GENMASK(27, 16)); +} + +static inline void set_tx_desc_pkt_id(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 3), __value, GENMASK(31, 28)); +} + /* Dword 4 */ -#define SET_TX_DESC_RTS_RATE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 0, 5, __value) -#define SET_TX_DESC_AP_DCFE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 5, 1, __value) -#define SET_TX_DESC_QOS(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 6, 1, __value) -#define SET_TX_DESC_HWSEQ_EN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 7, 1, __value) -#define SET_TX_DESC_USE_RATE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 8, 1, __value) -#define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 9, 1, __value) -#define SET_TX_DESC_DISABLE_FB(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 10, 1, __value) -#define SET_TX_DESC_CTS2SELF(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 11, 1, __value) -#define SET_TX_DESC_RTS_ENABLE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 12, 1, __value) -#define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 13, 1, __value) -#define SET_TX_DESC_WAIT_DCTS(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 18, 1, __value) -#define SET_TX_DESC_CTS2AP_EN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 19, 1, __value) -#define SET_TX_DESC_DATA_SC(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 20, 2, __value) -#define SET_TX_DESC_DATA_STBC(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 22, 2, __value) -#define SET_TX_DESC_DATA_SHORT(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 24, 1, __value) -#define SET_TX_DESC_DATA_BW(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 25, 1, __value) -#define SET_TX_DESC_RTS_SHORT(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 26, 1, __value) -#define SET_TX_DESC_RTS_BW(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 27, 1, __value) -#define SET_TX_DESC_RTS_SC(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 28, 2, __value) -#define SET_TX_DESC_RTS_STBC(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 16, 30, 2, __value) +static inline void set_tx_desc_rts_rate(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, GENMASK(4, 0)); +} + +static inline void set_tx_desc_qos(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, BIT(6)); +} + +static inline void set_tx_desc_hwseq_en(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, BIT(7)); +} + +static inline void set_tx_desc_use_rate(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, BIT(8)); +} + +static inline void set_tx_desc_disable_fb(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, BIT(10)); +} + +static inline void set_tx_desc_cts2self(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, BIT(11)); +} + +static inline void set_tx_desc_rts_enable(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, BIT(12)); +} + +static inline void set_tx_desc_hw_rts_enable(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, BIT(13)); +} + +static inline void set_tx_desc_data_sc(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, GENMASK(21, 20)); +} + +static inline void set_tx_desc_data_bw(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, BIT(25)); +} + +static inline void set_tx_desc_rts_short(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, BIT(26)); +} + +static inline void set_tx_desc_rts_bw(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, BIT(27)); +} + +static inline void set_tx_desc_rts_sc(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, GENMASK(29, 28)); +} + +static inline void set_tx_desc_rts_stbc(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, GENMASK(31, 30)); +} + /* Dword 5 */ -#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc + 20, 0, 6, __val) -#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc + 20, 6, 1, __val) -#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc + 20, 7, 1, __val) -#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 20, 8, 5, __value) -#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 20, 13, 4, __value) -#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 20, 17, 1, __value) -#define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 20, 18, 6, __value) -#define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 20, 24, 8, __value) +static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0)); +} + +static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, BIT(6)); +} + +static inline void set_tx_desc_data_rate_fb_limit(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 5), __value, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 5), __value, GENMASK(16, 13)); +} + /* Dword 6 */ -#define SET_TX_DESC_TXAGC_A(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 24, 0, 5, __value) -#define SET_TX_DESC_TXAGC_B(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 24, 5, 5, __value) -#define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 24, 10, 1, __value) -#define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 24, 11, 5, __value) -#define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 24, 16, 4, __value) -#define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 24, 20, 4, __value) -#define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 24, 24, 4, __value) -#define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 24, 28, 4, __value) +static inline void set_tx_desc_max_agg_num(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 6), __value, GENMASK(15, 11)); +} + /* Dword 7 */ -#define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 28, 0, 16, __value) -#define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 28, 16, 4, __value) -#define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 28, 20, 4, __value) -#define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 28, 24, 4, __value) -#define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc + 28, 28, 4, __value) +static inline void set_tx_desc_tx_desc_checksum(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 7), __value, GENMASK(15, 0)); +} + int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw); u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h index fa33b05db052..21726d9b4aef 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h @@ -26,37 +26,6 @@ #define RX_MPDU_QUEUE 0 #define RX_CMD_QUEUE 1 -#define C2H_RX_CMD_HDR_LEN 8 -#define GET_C2H_CMD_CMD_LEN(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 0, 16) -#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 16, 8) -#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 24, 7) -#define GET_C2H_CMD_CONTINUE(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 31, 1) -#define GET_C2H_CMD_CONTENT(__prxhdr) \ - ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN) - -#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16) -#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) -#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) - enum version_8192d { VERSION_TEST_CHIP_88C = 0x0000, VERSION_TEST_CHIP_92C = 0x0020, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index d162884a9e00..2494e1f118f8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -4,6 +4,7 @@ #include "../wifi.h" #include "../pci.h" #include "../base.h" +#include "../stats.h" #include "reg.h" #include "def.h" #include "phy.h" @@ -32,21 +33,6 @@ static u8 _rtl92d_query_rxpwrpercentage(s8 antpower) return 100 + antpower; } -static u8 _rtl92d_evm_db_to_percentage(s8 value) -{ - s8 ret_val = value; - - if (ret_val >= 0) - ret_val = 0; - if (ret_val <= -33) - ret_val = -33; - ret_val = 0 - ret_val; - ret_val *= 3; - if (ret_val == 99) - ret_val = 100; - return ret_val; -} - static long _rtl92de_translate_todbm(struct ieee80211_hw *hw, u8 signal_strength_index) { @@ -215,7 +201,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw, else max_spatial_stream = 1; for (i = 0; i < max_spatial_stream; i++) { - evm = _rtl92d_evm_db_to_percentage(p_drvinfo->rxevm[i]); + evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]); if (packet_match_bssid) { if (i == 0) pstats->signalquality = diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index d297cfc0fd2b..27f1a631b569 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -34,7 +34,7 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo; - s8 rx_pwr_all = 0, rx_pwr[4]; + s8 rx_pwr_all, rx_pwr[4]; u8 rf_rx_num = 0, evm, pwdb_all; u8 i, max_spatial_stream; u32 rssi, total_rssi = 0; @@ -95,6 +95,7 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw, rx_pwr_all = 14 - 2 * vga_idx; break; default: + rx_pwr_all = 0; break; } rx_pwr_all += 16; @@ -271,13 +272,14 @@ static void _rtl92ee_translate_rx_signal_stuff(struct ieee80211_hw *hw, } static void _rtl92ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, - u8 *virtualaddress) + u8 *virtualaddress8) { - u32 dwtmp = 0; + u32 dwtmp; + __le32 *virtualaddress = (__le32 *)virtualaddress8; memset(virtualaddress, 0, 8); - SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num); + set_earlymode_pktnum(virtualaddress, ptcb_desc->empkt_num); if (ptcb_desc->empkt_num == 1) { dwtmp = ptcb_desc->empkt_len[0]; } else { @@ -285,7 +287,7 @@ static void _rtl92ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; dwtmp += ptcb_desc->empkt_len[1]; } - SET_EARLYMODE_LEN0(virtualaddress, dwtmp); + set_earlymode_len0(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 3) { dwtmp = ptcb_desc->empkt_len[2]; @@ -294,7 +296,7 @@ static void _rtl92ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; dwtmp += ptcb_desc->empkt_len[3]; } - SET_EARLYMODE_LEN1(virtualaddress, dwtmp); + set_earlymode_len1(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 5) { dwtmp = ptcb_desc->empkt_len[4]; } else { @@ -302,8 +304,8 @@ static void _rtl92ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; dwtmp += ptcb_desc->empkt_len[5]; } - SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF); - SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4); + set_earlymode_len2_1(virtualaddress, dwtmp & 0xF); + set_earlymode_len2_2(virtualaddress, dwtmp >> 4); if (ptcb_desc->empkt_num <= 7) { dwtmp = ptcb_desc->empkt_len[6]; } else { @@ -311,7 +313,7 @@ static void _rtl92ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; dwtmp += ptcb_desc->empkt_len[7]; } - SET_EARLYMODE_LEN3(virtualaddress, dwtmp); + set_earlymode_len3(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 9) { dwtmp = ptcb_desc->empkt_len[8]; } else { @@ -319,43 +321,44 @@ static void _rtl92ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; dwtmp += ptcb_desc->empkt_len[9]; } - SET_EARLYMODE_LEN4(virtualaddress, dwtmp); + set_earlymode_len4(virtualaddress, dwtmp); } bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *status, struct ieee80211_rx_status *rx_status, - u8 *pdesc, struct sk_buff *skb) + u8 *pdesc8, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rx_fwinfo *p_drvinfo; struct ieee80211_hdr *hdr; - u32 phystatus = GET_RX_DESC_PHYST(pdesc); + __le32 *pdesc = (__le32 *)pdesc8; + u32 phystatus = get_rx_desc_physt(pdesc); u8 wake_match; - if (GET_RX_STATUS_DESC_RPT_SEL(pdesc) == 0) + if (get_rx_status_desc_rpt_sel(pdesc) == 0) status->packet_report_type = NORMAL_RX; else status->packet_report_type = C2H_PACKET; - status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); - status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + status->length = (u16)get_rx_desc_pkt_len(pdesc); + status->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) * RX_DRV_INFO_SIZE_UNIT; - status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03); - status->icv = (u16)GET_RX_DESC_ICV(pdesc); - status->crc = (u16)GET_RX_DESC_CRC32(pdesc); + status->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03); + status->icv = (u16)get_rx_desc_icv(pdesc); + status->crc = (u16)get_rx_desc_crc32(pdesc); status->hwerror = (status->crc | status->icv); - status->decrypted = !GET_RX_DESC_SWDEC(pdesc); - status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); - status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); - status->timestamp_low = GET_RX_DESC_TSFL(pdesc); + status->decrypted = !get_rx_desc_swdec(pdesc); + status->rate = (u8)get_rx_desc_rxmcs(pdesc); + status->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1); + status->timestamp_low = get_rx_desc_tsfl(pdesc); status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate); - status->macid = GET_RX_DESC_MACID(pdesc); - if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc)) + status->macid = get_rx_desc_macid(pdesc); + if (get_rx_status_desc_pattern_match(pdesc)) wake_match = BIT(2); - else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) + else if (get_rx_status_desc_magic_match(pdesc)) wake_match = BIT(1); - else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) + else if (get_rx_status_desc_unicast_match(pdesc)) wake_match = BIT(0); else wake_match = 0; @@ -409,42 +412,43 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw, p_drvinfo = (struct rx_fwinfo *)(skb->data + status->rx_bufshift + 24); - _rtl92ee_translate_rx_signal_stuff(hw, skb, status, pdesc, + _rtl92ee_translate_rx_signal_stuff(hw, skb, status, pdesc8, p_drvinfo); } rx_status->signal = status->recvsignalpower + 10; if (status->packet_report_type == TX_REPORT2) { status->macid_valid_entry[0] = - GET_RX_RPT2_DESC_MACID_VALID_1(pdesc); + get_rx_rpt2_desc_macid_valid_1(pdesc); status->macid_valid_entry[1] = - GET_RX_RPT2_DESC_MACID_VALID_2(pdesc); + get_rx_rpt2_desc_macid_valid_2(pdesc); } return true; } /*in Windows, this == Rx_92EE_Interrupt*/ -void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc, +void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc8, u8 queue_index) { u8 first_seg = 0; u8 last_seg = 0; u16 total_len = 0; u16 read_cnt = 0; + __le32 *header_desc = (__le32 *)header_desc8; if (header_desc == NULL) return; - total_len = (u16)GET_RX_BUFFER_DESC_TOTAL_LENGTH(header_desc); + total_len = (u16)get_rx_buffer_desc_total_length(header_desc); - first_seg = (u8)GET_RX_BUFFER_DESC_FS(header_desc); + first_seg = (u8)get_rx_buffer_desc_fs(header_desc); - last_seg = (u8)GET_RX_BUFFER_DESC_LS(header_desc); + last_seg = (u8)get_rx_buffer_desc_ls(header_desc); while (total_len == 0 && first_seg == 0 && last_seg == 0) { read_cnt++; - total_len = (u16)GET_RX_BUFFER_DESC_TOTAL_LENGTH(header_desc); - first_seg = (u8)GET_RX_BUFFER_DESC_FS(header_desc); - last_seg = (u8)GET_RX_BUFFER_DESC_LS(header_desc); + total_len = (u16)get_rx_buffer_desc_total_length(header_desc); + first_seg = (u8)get_rx_buffer_desc_fs(header_desc); + last_seg = (u8)get_rx_buffer_desc_ls(header_desc); if (read_cnt > 20) break; @@ -455,8 +459,8 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); - u16 read_point = 0, write_point = 0, remind_cnt = 0; - u32 tmp_4byte = 0; + u16 read_point, write_point, remind_cnt; + u32 tmp_4byte; static bool start_rx; tmp_4byte = rtl_read_dword(rtlpriv, REG_RXQ_TXBD_IDX); @@ -490,7 +494,7 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index) static u16 get_desc_addr_fr_q_idx(u16 queue_index) { - u16 desc_address = REG_BEQ_TXBD_IDX; + u16 desc_address; switch (queue_index) { case BK_QUEUE: @@ -521,6 +525,7 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index) desc_address = REG_BEQ_TXBD_IDX; break; default: + desc_address = REG_BEQ_TXBD_IDX; break; } return desc_address; @@ -530,7 +535,7 @@ u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) { struct rtl_priv *rtlpriv = rtl_priv(hw); u16 point_diff = 0; - u16 current_tx_read_point = 0, current_tx_write_point = 0; + u16 current_tx_read_point, current_tx_write_point; u32 tmp_4byte; tmp_4byte = rtl_read_dword(rtlpriv, @@ -546,7 +551,7 @@ u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) } void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, - u8 *tx_bd_desc, u8 *desc, u8 queue_index, + u8 *tx_bd_desc8, u8 *desc8, u8 queue_index, struct sk_buff *skb, dma_addr_t addr) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -554,15 +559,17 @@ void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, u32 pkt_len = skb->len; u16 desc_size = 40; /*tx desc size*/ u32 psblen = 0; - u16 tx_page_size = 0; - u32 total_packet_size = 0; + u16 tx_page_size; + u32 total_packet_size; u16 current_bd_desc; - u8 i = 0; + u8 i; u16 real_desc_size = 0x28; u16 append_early_mode_size = 0; u8 segmentnum = 1 << (RTL8192EE_SEG_NUM + 1); dma_addr_t desc_dma_addr; bool dma64 = rtlpriv->cfg->mod_params->dma64; + __le32 *desc = (__le32 *)desc8; + __le32 *tx_bd_desc = (__le32 *)tx_bd_desc8; tx_page_size = 2; current_bd_desc = rtlpci->tx_ring[queue_index].cur_tx_wp; @@ -589,48 +596,48 @@ void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, (current_bd_desc * TX_DESC_SIZE); /* Reset */ - SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, 0); - SET_TX_BUFF_DESC_PSB(tx_bd_desc, 0); - SET_TX_BUFF_DESC_OWN(tx_bd_desc, 0); + set_tx_buff_desc_len_0(tx_bd_desc, 0); + set_tx_buff_desc_psb(tx_bd_desc, 0); + set_tx_buff_desc_own(tx_bd_desc, 0); for (i = 1; i < segmentnum; i++) { - SET_TXBUFFER_DESC_LEN_WITH_OFFSET(tx_bd_desc, i, 0); - SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(tx_bd_desc, i, 0); - SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(tx_bd_desc, i, 0); - SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(tx_bd_desc, i, 0, dma64); + set_txbuffer_desc_len_with_offset(tx_bd_desc, i, 0); + set_txbuffer_desc_amsdu_with_offset(tx_bd_desc, i, 0); + set_txbuffer_desc_add_low_with_offset(tx_bd_desc, i, 0); + set_txbuffer_desc_add_high_with_offset(tx_bd_desc, i, 0, dma64); } /* Clear all status */ - CLEAR_PCI_TX_DESC_CONTENT(desc, TX_DESC_SIZE); + clear_pci_tx_desc_content(desc, TX_DESC_SIZE); if (rtlpriv->rtlhal.earlymode_enable) { if (queue_index < BEACON_QUEUE) { /* This if needs braces */ - SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, desc_size + 8); + set_tx_buff_desc_len_0(tx_bd_desc, desc_size + 8); } else { - SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, desc_size); + set_tx_buff_desc_len_0(tx_bd_desc, desc_size); } } else { - SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, desc_size); + set_tx_buff_desc_len_0(tx_bd_desc, desc_size); } - SET_TX_BUFF_DESC_PSB(tx_bd_desc, psblen); - SET_TX_BUFF_DESC_ADDR_LOW_0(tx_bd_desc, desc_dma_addr); - SET_TX_BUFF_DESC_ADDR_HIGH_0(tx_bd_desc, ((u64)desc_dma_addr >> 32), + set_tx_buff_desc_psb(tx_bd_desc, psblen); + set_tx_buff_desc_addr_low_0(tx_bd_desc, desc_dma_addr); + set_tx_buff_desc_addr_high_0(tx_bd_desc, ((u64)desc_dma_addr >> 32), dma64); - SET_TXBUFFER_DESC_LEN_WITH_OFFSET(tx_bd_desc, 1, pkt_len); + set_txbuffer_desc_len_with_offset(tx_bd_desc, 1, pkt_len); /* don't using extendsion mode. */ - SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(tx_bd_desc, 1, 0); - SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(tx_bd_desc, 1, addr); - SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(tx_bd_desc, 1, + set_txbuffer_desc_amsdu_with_offset(tx_bd_desc, 1, 0); + set_txbuffer_desc_add_low_with_offset(tx_bd_desc, 1, addr); + set_txbuffer_desc_add_high_with_offset(tx_bd_desc, 1, ((u64)addr >> 32), dma64); - SET_TX_DESC_PKT_SIZE(desc, (u16)(pkt_len)); - SET_TX_DESC_TX_BUFFER_SIZE(desc, (u16)(pkt_len)); + set_tx_desc_pkt_size(desc, (u16)(pkt_len)); + set_tx_desc_tx_buffer_size(desc, (u16)(pkt_len)); } void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, - struct ieee80211_hdr *hdr, u8 *pdesc_tx, + struct ieee80211_hdr *hdr, u8 *pdesc8, u8 *pbd_desc_tx, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, @@ -642,10 +649,9 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb); - u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; - unsigned int buf_len = 0; + unsigned int buf_len; u8 fw_qsel = _rtl92ee_map_hwqueue_to_fwqueue(skb, hw_queue); bool firstseg = ((hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); @@ -653,7 +659,8 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); dma_addr_t mapping; u8 bw_40 = 0; - u8 short_gi = 0; + u8 short_gi; + __le32 *pdesc = (__le32 *)pdesc8; if (mac->opmode == NL80211_IFTYPE_STATION) { bw_40 = mac->bw_40; @@ -680,7 +687,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, } if (pbd_desc_tx != NULL) - rtl92ee_pre_fill_tx_bd_desc(hw, pbd_desc_tx, pdesc, hw_queue, + rtl92ee_pre_fill_tx_bd_desc(hw, pbd_desc_tx, pdesc8, hw_queue, skb, mapping); if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) { @@ -689,8 +696,8 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, } if (firstseg) { if (rtlhal->earlymode_enable) { - SET_TX_DESC_PKT_OFFSET(pdesc, 1); - SET_TX_DESC_OFFSET(pdesc, + set_tx_desc_pkt_offset(pdesc, 1); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN + EM_HDR_LEN); if (ptcb_desc->empkt_num) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, @@ -700,18 +707,18 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, (u8 *)(skb->data)); } } else { - SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); } - SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); + set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate); if (ieee80211_is_mgmt(fc)) { ptcb_desc->use_driver_rate = true; } else { if (rtlpriv->ra.is_special_data) { ptcb_desc->use_driver_rate = true; - SET_TX_DESC_TX_RATE(pdesc, DESC_RATE11M); + set_tx_desc_tx_rate(pdesc, DESC_RATE11M); } else { ptcb_desc->use_driver_rate = false; } @@ -723,46 +730,46 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0; if (info->flags & IEEE80211_TX_CTL_AMPDU) { - SET_TX_DESC_AGG_ENABLE(pdesc, 1); - SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14); + set_tx_desc_agg_enable(pdesc, 1); + set_tx_desc_max_agg_num(pdesc, 0x14); } - SET_TX_DESC_SEQ(pdesc, seq_number); - SET_TX_DESC_RTS_ENABLE(pdesc, + set_tx_desc_seq(pdesc, seq_number); + set_tx_desc_rts_enable(pdesc, ((ptcb_desc->rts_enable && !ptcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0); - SET_TX_DESC_CTS2SELF(pdesc, + set_tx_desc_hw_rts_enable(pdesc, 0); + set_tx_desc_cts2self(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); - SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); - SET_TX_DESC_RTS_SHORT(pdesc, + set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate); + set_tx_desc_rts_sc(pdesc, ptcb_desc->rts_sc); + set_tx_desc_rts_short(pdesc, ((ptcb_desc->rts_rate <= DESC_RATE54M) ? (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : (ptcb_desc->rts_use_shortgi ? 1 : 0))); if (ptcb_desc->tx_enable_sw_calc_duration) - SET_TX_DESC_NAV_USE_HDR(pdesc, 1); + set_tx_desc_nav_use_hdr(pdesc, 1); if (bw_40) { if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { - SET_TX_DESC_DATA_BW(pdesc, 1); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); + set_tx_desc_data_bw(pdesc, 1); + set_tx_desc_tx_sub_carrier(pdesc, 3); } else { - SET_TX_DESC_DATA_BW(pdesc, 0); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, + set_tx_desc_data_bw(pdesc, 0); + set_tx_desc_tx_sub_carrier(pdesc, mac->cur_40_prime_sc); } } else { - SET_TX_DESC_DATA_BW(pdesc, 0); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0); + set_tx_desc_data_bw(pdesc, 0); + set_tx_desc_tx_sub_carrier(pdesc, 0); } - SET_TX_DESC_LINIP(pdesc, 0); + set_tx_desc_linip(pdesc, 0); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; - SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); + set_tx_desc_ampdu_density(pdesc, ampdu_density); } if (info->control.hw_key) { struct ieee80211_key_conf *key = info->control.hw_key; @@ -771,65 +778,65 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: - SET_TX_DESC_SEC_TYPE(pdesc, 0x1); + set_tx_desc_sec_type(pdesc, 0x1); break; case WLAN_CIPHER_SUITE_CCMP: - SET_TX_DESC_SEC_TYPE(pdesc, 0x3); + set_tx_desc_sec_type(pdesc, 0x3); break; default: - SET_TX_DESC_SEC_TYPE(pdesc, 0x0); + set_tx_desc_sec_type(pdesc, 0x0); break; } } - SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel); - SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); - SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); - SET_TX_DESC_DISABLE_FB(pdesc, + set_tx_desc_queue_sel(pdesc, fw_qsel); + set_tx_desc_data_rate_fb_limit(pdesc, 0x1F); + set_tx_desc_rts_rate_fb_limit(pdesc, 0xF); + set_tx_desc_disable_fb(pdesc, ptcb_desc->disable_ratefallback ? 1 : 0); - SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); + set_tx_desc_use_rate(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); - /*SET_TX_DESC_PWR_STATUS(pdesc, pwr_status);*/ + /*set_tx_desc_pwr_status(pdesc, pwr_status);*/ /* Set TxRate and RTSRate in TxDesc */ /* This prevent Tx initial rate of new-coming packets */ /* from being overwritten by retried packet rate.*/ if (!ptcb_desc->use_driver_rate) { - /*SET_TX_DESC_RTS_RATE(pdesc, 0x08); */ - /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */ + /*set_tx_desc_rts_rate(pdesc, 0x08); */ + /* set_tx_desc_tx_rate(pdesc, 0x0b); */ } if (ieee80211_is_data_qos(fc)) { if (mac->rdg_en) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Enable RDG function.\n"); - SET_TX_DESC_RDG_ENABLE(pdesc, 1); - SET_TX_DESC_HTC(pdesc, 1); + set_tx_desc_rdg_enable(pdesc, 1); + set_tx_desc_htc(pdesc, 1); } } /* tx report */ - rtl_set_tx_report(ptcb_desc, pdesc, hw, tx_info); + rtl_set_tx_report(ptcb_desc, pdesc8, hw, tx_info); } - SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); - SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); - SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0)); + set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0)); + set_tx_desc_tx_buffer_address(pdesc, mapping); if (rtlpriv->dm.useramask) { - SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); - SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + set_tx_desc_rate_id(pdesc, ptcb_desc->ratr_index); + set_tx_desc_macid(pdesc, ptcb_desc->mac_id); } else { - SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index); - SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index); + set_tx_desc_rate_id(pdesc, 0xC + ptcb_desc->ratr_index); + set_tx_desc_macid(pdesc, ptcb_desc->ratr_index); } - SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); + set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1)); if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { - SET_TX_DESC_BMC(pdesc, 1); + set_tx_desc_bmc(pdesc, 1); } RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc, bool firstseg, + u8 *pdesc8, bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -839,61 +846,63 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, skb->data, skb->len, PCI_DMA_TODEVICE); u8 txdesc_len = 40; + __le32 *pdesc = (__le32 *)pdesc8; if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n"); return; } - CLEAR_PCI_TX_DESC_CONTENT(pdesc, txdesc_len); + clear_pci_tx_desc_content(pdesc, txdesc_len); if (firstseg) - SET_TX_DESC_OFFSET(pdesc, txdesc_len); + set_tx_desc_offset(pdesc, txdesc_len); - SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M); + set_tx_desc_tx_rate(pdesc, DESC_RATE1M); - SET_TX_DESC_SEQ(pdesc, 0); + set_tx_desc_seq(pdesc, 0); - SET_TX_DESC_LINIP(pdesc, 0); + set_tx_desc_linip(pdesc, 0); - SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); + set_tx_desc_queue_sel(pdesc, fw_queue); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); - SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len)); + set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len)); - SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + set_tx_desc_tx_buffer_address(pdesc, mapping); - SET_TX_DESC_RATE_ID(pdesc, 7); - SET_TX_DESC_MACID(pdesc, 0); + set_tx_desc_rate_id(pdesc, 7); + set_tx_desc_macid(pdesc, 0); - SET_TX_DESC_OWN(pdesc, 1); + set_tx_desc_own(pdesc, 1); - SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len)); + set_tx_desc_pkt_size(pdesc, (u16)(skb->len)); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); - SET_TX_DESC_OFFSET(pdesc, 40); + set_tx_desc_offset(pdesc, 40); - SET_TX_DESC_USE_RATE(pdesc, 1); + set_tx_desc_use_rate(pdesc, 1); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n", pdesc, txdesc_len); } -void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, +void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx, u8 desc_name, u8 *val) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 q_idx = *val; bool dma64 = rtlpriv->cfg->mod_params->dma64; + __le32 *pdesc = (__le32 *)pdesc8; if (istx) { switch (desc_name) { case HW_DESC_TX_NEXTDESC_ADDR: - SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); + set_tx_desc_next_desc_address(pdesc, *(u32 *)val); break; case HW_DESC_OWN:{ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -903,7 +912,7 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, if (q_idx == BEACON_QUEUE) { ring->cur_tx_wp = 0; ring->cur_tx_rp = 0; - SET_TX_BUFF_DESC_OWN(pdesc, 1); + set_tx_buff_desc_own(pdesc, 1); return; } @@ -919,23 +928,23 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, } else { switch (desc_name) { case HW_DESC_RX_PREPARE: - SET_RX_BUFFER_DESC_LS(pdesc, 0); - SET_RX_BUFFER_DESC_FS(pdesc, 0); - SET_RX_BUFFER_DESC_TOTAL_LENGTH(pdesc, 0); + set_rx_buffer_desc_ls(pdesc, 0); + set_rx_buffer_desc_fs(pdesc, 0); + set_rx_buffer_desc_total_length(pdesc, 0); - SET_RX_BUFFER_DESC_DATA_LENGTH(pdesc, + set_rx_buffer_desc_data_length(pdesc, MAX_RECEIVE_BUFFER_SIZE + RX_DESC_SIZE); - SET_RX_BUFFER_PHYSICAL_LOW(pdesc, (*(dma_addr_t *)val) & + set_rx_buffer_physical_low(pdesc, (*(dma_addr_t *)val) & DMA_BIT_MASK(32)); - SET_RX_BUFFER_PHYSICAL_HIGH(pdesc, + set_rx_buffer_physical_high(pdesc, ((u64)(*(dma_addr_t *)val) >> 32), dma64); break; case HW_DESC_RXERO: - SET_RX_DESC_EOR(pdesc, 1); + set_rx_desc_eor(pdesc, 1); break; default: WARN_ONCE(true, @@ -947,20 +956,21 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, } u64 rtl92ee_get_desc(struct ieee80211_hw *hw, - u8 *pdesc, bool istx, u8 desc_name) + u8 *pdesc8, bool istx, u8 desc_name) { struct rtl_priv *rtlpriv = rtl_priv(hw); u64 ret = 0; bool dma64 = rtlpriv->cfg->mod_params->dma64; + __le32 *pdesc = (__le32 *)pdesc8; if (istx) { switch (desc_name) { case HW_DESC_OWN: - ret = GET_TX_DESC_OWN(pdesc); + ret = get_tx_desc_own(pdesc); break; case HW_DESC_TXBUFF_ADDR: - ret = GET_TXBUFFER_DESC_ADDR_LOW(pdesc, 1); - ret |= (u64)GET_TXBUFFER_DESC_ADDR_HIGH(pdesc, 1, + ret = get_txbuffer_desc_addr_low(pdesc, 1); + ret |= (u64)get_txbuffer_desc_addr_high(pdesc, 1, dma64) << 32; break; default: @@ -972,13 +982,13 @@ u64 rtl92ee_get_desc(struct ieee80211_hw *hw, } else { switch (desc_name) { case HW_DESC_OWN: - ret = GET_RX_DESC_OWN(pdesc); + ret = get_rx_desc_own(pdesc); break; case HW_DESC_RXPKT_LEN: - ret = GET_RX_DESC_PKT_LEN(pdesc); + ret = get_rx_desc_pkt_len(pdesc); break; case HW_DESC_RXBUFF_ADDR: - ret = GET_RX_DESC_BUFF_ADDR(pdesc); + ret = get_rx_desc_buff_addr(pdesc); break; default: WARN_ONCE(true, @@ -1001,7 +1011,7 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index) { u16 cur_tx_rp, cur_tx_wp; - u32 tmpu32 = 0; + u32 tmpu32; tmpu32 = rtl_read_dword(rtlpriv, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h index a9e5e620a653..967cef3a9cbf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h @@ -14,466 +14,471 @@ #define RX_DESC_SIZE 24 #define MAX_RECEIVE_BUFFER_SIZE 8192 -#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) -#define SET_TX_DESC_OFFSET(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) -#define SET_TX_DESC_BMC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) -#define SET_TX_DESC_HTC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) -#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) -#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) -#define SET_TX_DESC_LINIP(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) -#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) -#define SET_TX_DESC_GF(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) -#define SET_TX_DESC_OWN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) - -#define GET_TX_DESC_PKT_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 0, 16) -#define GET_TX_DESC_OFFSET(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 16, 8) -#define GET_TX_DESC_BMC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 24, 1) -#define GET_TX_DESC_HTC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 25, 1) -#define GET_TX_DESC_LAST_SEG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 26, 1) -#define GET_TX_DESC_FIRST_SEG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 27, 1) -#define GET_TX_DESC_LINIP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 28, 1) -#define GET_TX_DESC_NO_ACM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 29, 1) -#define GET_TX_DESC_GF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 30, 1) -#define GET_TX_DESC_OWN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 31, 1) - -#define SET_TX_DESC_MACID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val) -#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) -#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) -#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) -#define SET_TX_DESC_PIFS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) -#define SET_TX_DESC_RATE_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val) -#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) -#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) -#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val) -#define SET_TX_DESC_MORE_DATA(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 29, 1, __val) -#define SET_TX_DESC_TXOP_PS_CAP(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 30, 1, __val) -#define SET_TX_DESC_TXOP_PS_MODE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 31, 1, __val) - -#define GET_TX_DESC_MACID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) -#define GET_TX_DESC_AGG_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 5, 1) -#define GET_TX_DESC_AGG_BREAK(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 6, 1) -#define GET_TX_DESC_RDG_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 7, 1) -#define GET_TX_DESC_QUEUE_SEL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 8, 5) -#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) -#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) -#define GET_TX_DESC_PIFS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) -#define GET_TX_DESC_RATE_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) -#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) -#define GET_TX_DESC_EN_DESC_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) -#define GET_TX_DESC_SEC_TYPE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 22, 2) -#define GET_TX_DESC_PKT_OFFSET(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 24, 5) - -#define SET_TX_DESC_PAID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val) -#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val) -#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val) -#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val) -#define SET_TX_DESC_NULL_0(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 1, __val) -#define SET_TX_DESC_NULL_1(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 15, 1, __val) -#define SET_TX_DESC_BK(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val) -#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) -#define SET_TX_DESC_RAW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) -#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val) -#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) -#define SET_TX_DESC_BT_NULL(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val) -#define SET_TX_DESC_GID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val) - -#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val) -#define SET_TX_DESC_CHK_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val) -#define SET_TX_DESC_EARLY_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val) -#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val) -#define SET_TX_DESC_USE_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val) -#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val) -#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val) -#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val) -#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val) -#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val) -#define SET_TX_DESC_HW_PORT_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 14, 1, __val) -#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val) -#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val) -#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val) -#define SET_TX_DESC_NDPA(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val) -#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val) +static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(15, 0)); +} + +static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(23, 16)); +} + +static inline void set_tx_desc_bmc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(24)); +} + +static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(25)); +} + +static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(26)); +} + +static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(27)); +} + +static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(28)); +} + +static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} + +static inline int get_tx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc), BIT(31)); +} + +static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(6, 0)); +} + +static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(20, 16)); +} + +static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22)); +} + +static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(28, 24)); +} + +static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, BIT(12)); +} + +static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, BIT(13)); +} + +static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, BIT(17)); +} + +static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20)); +} + +static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(8)); +} + +static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(10)); +} + +static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(11)); +} + +static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(12)); +} + +static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(13)); +} + +static inline void set_tx_desc_nav_use_hdr(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(15)); +} + +static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, GENMASK(21, 17)); +} /* Dword 4 */ -#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val) -#define SET_TX_DESC_TRY_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val) -#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val) -#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val) -#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val) -#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val) -#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val) -#define SET_TX_DESC_PCTS_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 29, 1, __val) -#define SET_TX_DESC_PCTS_MASK_IDX(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val) +static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(6, 0)); +} + +static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(16, 13)); +} + +static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(28, 24)); +} /* Dword 5 */ -#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val) -#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 4, 1, __val) -#define SET_TX_DESC_DATA_BW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val) -#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val) -#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val) -#define SET_TX_DESC_VCS_STBC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val) -#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val) -#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val) -#define SET_TX_DESC_TX_ANT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 4, __val) -#define SET_TX_DESC_TX_POWER_0_PSET(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 28, 3, __val) - -/* Dword 6 */ -#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val) -#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 16, 3, __val) -#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 19, 3, __val) -#define SET_TX_DESC_ANTSEL_C(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 22, 3, __val) -#define SET_TX_DESC_ANTSEL_D(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 25, 3, __val) +static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(3, 0)); +} + +static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(6, 5)); +} + +static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, BIT(12)); +} + +static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13)); +} /* Dword 7 */ -#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val) -#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 8, __val) - -/* Dword 8 */ -#define SET_TX_DESC_RTS_RC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 6, __val) -#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+32, 6, 2, __val) -#define SET_TX_DESC_DATA_RC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+32, 8, 6, __val) -#define SET_TX_DESC_ENABLE_HW_SELECT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val) -#define SET_TX_DESC_NEXT_HEAD_PAGE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+32, 16, 8, __val) -#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+32, 24, 8, __val) +static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0)); +} /* Dword 9 */ -#define SET_TX_DESC_PADDING_LENGTH(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 11, __val) -#define SET_TX_DESC_TXBF_PATH(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+36, 11, 1, __val) -#define SET_TX_DESC_SEQ(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val) -#define SET_TX_DESC_FINAL_DATA_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+36, 24, 8, __val) +static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 9), __val, GENMASK(23, 12)); +} /* Dword 10 */ -#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) +static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 10) = cpu_to_le32(__val); +} /* Dword 11*/ -#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val) - -#define SET_EARLYMODE_PKTNUM(__paddr, __val) \ - SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __val) -#define SET_EARLYMODE_LEN0(__paddr, __val) \ - SET_BITS_TO_LE_4BYTE(__paddr, 4, 15, __val) -#define SET_EARLYMODE_LEN1(__paddr, __val) \ - SET_BITS_TO_LE_4BYTE(__paddr, 16, 2, __val) -#define SET_EARLYMODE_LEN1_1(__paddr, __val) \ - SET_BITS_TO_LE_4BYTE(__paddr, 19, 13, __val) -#define SET_EARLYMODE_LEN1_2(__paddr, __val) \ - SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 2, __val) -#define SET_EARLYMODE_LEN2(__paddr, __val) \ - SET_BITS_TO_LE_4BYTE(__paddr+4, 2, 15, __val) -#define SET_EARLYMODE_LEN2_1(__paddr, __val) \ - SET_BITS_TO_LE_4BYTE(__paddr, 2, 4, __val) -#define SET_EARLYMODE_LEN2_2(__paddr, __val) \ - SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __val) -#define SET_EARLYMODE_LEN3(__paddr, __val) \ - SET_BITS_TO_LE_4BYTE(__paddr+4, 17, 15, __val) -#define SET_EARLYMODE_LEN4(__paddr, __val) \ - SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __val) +static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 12) = cpu_to_le32(__val); +} + +static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __val) +{ + le32p_replace_bits(__paddr, __val, GENMASK(3, 0)); +} + +static inline void set_earlymode_len0(__le32 *__paddr, u32 __val) +{ + le32p_replace_bits(__paddr, __val, GENMASK(18, 4)); +} + +static inline void set_earlymode_len1(__le32 *__paddr, u32 __val) +{ + le32p_replace_bits(__paddr, __val, GENMASK(17, 16)); +} + +static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __val) +{ + le32p_replace_bits(__paddr, __val, GENMASK(5, 2)); +} + +static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __val) +{ + le32p_replace_bits((__paddr + 1), __val, GENMASK(7, 0)); +} + +static inline void set_earlymode_len3(__le32 *__paddr, u32 __val) +{ + le32p_replace_bits((__paddr + 1), __val, GENMASK(31, 17)); +} + +static inline void set_earlymode_len4(__le32 *__paddr, u32 __val) +{ + le32p_replace_bits((__paddr + 1), __val, GENMASK(31, 20)); +} /* TX/RX buffer descriptor */ -#define SET_TX_EXTBUFF_DESC_LEN(__pdesc, __val, __set) \ - SET_BITS_TO_LE_4BYTE(__pdesc+(__set*16), 0, 16, __val) -#define SET_TX_EXTBUFF_DESC_ADDR_LOW(__pdesc, __val, __set)\ - SET_BITS_TO_LE_4BYTE(__pdesc+(__set*16)+4, 0, 32, __val) -#define SET_TX_EXTBUFF_DESC_ADDR_HIGH(__pdesc, __val, __set)\ - SET_BITS_TO_LE_4BYTE(__pdesc+(__set*16)+8, 0, 32, __val) - /* for Txfilldescroptor92ee, fill the desc content. */ -#define SET_TXBUFFER_DESC_LEN_WITH_OFFSET(__pdesc, __offset, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + ((__offset) * 16), 0, 16, __val) -#define SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(__pdesc, __offset, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + ((__offset) * 16), 31, 1, __val) -#define SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(__pdesc, __offset, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + ((__offset) * 16) + 4, 0, 32, __val) -#define SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(pbd, off, val, dma64) \ - (dma64 ? SET_BITS_TO_LE_4BYTE((pbd) + ((off) * 16) + 8, 0, 32, val) : 0) -#define GET_TXBUFFER_DESC_ADDR_LOW(__pdesc, __offset) \ - LE_BITS_TO_4BYTE((__pdesc) + ((__offset) * 16) + 4, 0, 32) -#define GET_TXBUFFER_DESC_ADDR_HIGH(pbd, off, dma64) \ - (dma64 ? LE_BITS_TO_4BYTE((pbd) + ((off) * 16) + 8, 0, 32) : 0) +static inline void set_txbuffer_desc_len_with_offset(__le32 *__pdesc, + u8 __offset, u32 __val) +{ + le32p_replace_bits((__pdesc + 4 * __offset), __val, + GENMASK(15, 0)); +} + +static inline void set_txbuffer_desc_amsdu_with_offset(__le32 *__pdesc, + u8 __offset, u32 __val) +{ + le32p_replace_bits((__pdesc + 4 * __offset), __val, BIT(31)); +} + +static inline void set_txbuffer_desc_add_low_with_offset(__le32 *__pdesc, + u8 __offset, + u32 __val) +{ + *(__pdesc + 4 * __offset + 1) = cpu_to_le32(__val); +} + +static inline void set_txbuffer_desc_add_high_with_offset(__le32 *pbd, u8 off, + u32 val, bool dma64) +{ + if (dma64) + *(pbd + 4 * off + 2) = cpu_to_le32(val); + else + *(pbd + 4 * off + 2) = 0; +} + +static inline u32 get_txbuffer_desc_addr_low(__le32 *__pdesc, u8 __offset) +{ + return le32_to_cpu(*((__pdesc + 4 * __offset + 1))); +} + +static inline u32 get_txbuffer_desc_addr_high(__le32 *pbd, u32 off, bool dma64) +{ + if (dma64) + return le32_to_cpu(*((pbd + 4 * off + 2))); + return 0; +} /* Dword 0 */ -#define SET_TX_BUFF_DESC_LEN_0(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) -#define SET_TX_BUFF_DESC_PSB(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 16, 15, __val) -#define SET_TX_BUFF_DESC_OWN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) +static inline void set_tx_buff_desc_len_0(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(13, 0)); +} + +static inline void set_tx_buff_desc_psb(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(30, 16)); +} + +static inline void set_tx_buff_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} /* Dword 1 */ -#define SET_TX_BUFF_DESC_ADDR_LOW_0(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 4, 0, 32, __val) +static inline void set_tx_buff_desc_addr_low_0(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 1) = cpu_to_le32(__val); +} + /* Dword 2 */ -#define SET_TX_BUFF_DESC_ADDR_HIGH_0(bdesc, val, dma64) \ - SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(bdesc, 0, val, dma64) -/* Dword 3 / RESERVED 0 */ +static inline void set_tx_buff_desc_addr_high_0(__le32 *pdesc, u32 val, + bool dma64) +{ + if (dma64) + *(pdesc + 2) = cpu_to_le32(val); + else + *(pdesc + 2) = 0; +} /* RX buffer */ /* DWORD 0 */ -#define SET_RX_BUFFER_DESC_DATA_LENGTH(__status, __val) \ - SET_BITS_TO_LE_4BYTE(__status, 0, 14, __val) -#define SET_RX_BUFFER_DESC_LS(__status, __val) \ - SET_BITS_TO_LE_4BYTE(__status, 15, 1, __val) -#define SET_RX_BUFFER_DESC_FS(__status, __val) \ - SET_BITS_TO_LE_4BYTE(__status, 16, 1, __val) -#define SET_RX_BUFFER_DESC_TOTAL_LENGTH(__status, __val) \ - SET_BITS_TO_LE_4BYTE(__status, 16, 15, __val) - -#define GET_RX_BUFFER_DESC_OWN(__status) \ - LE_BITS_TO_4BYTE(__status, 31, 1) -#define GET_RX_BUFFER_DESC_LS(__status) \ - LE_BITS_TO_4BYTE(__status, 15, 1) -#define GET_RX_BUFFER_DESC_FS(__status) \ - LE_BITS_TO_4BYTE(__status, 16, 1) -#define GET_RX_BUFFER_DESC_TOTAL_LENGTH(__status) \ - LE_BITS_TO_4BYTE(__status, 16, 15) +static inline void set_rx_buffer_desc_data_length(__le32 *__status, u32 __val) +{ + le32p_replace_bits(__status, __val, GENMASK(13, 0)); +} + +static inline void set_rx_buffer_desc_ls(__le32 *__status, u32 __val) +{ + le32p_replace_bits(__status, __val, BIT(15)); +} + +static inline void set_rx_buffer_desc_fs(__le32 *__status, u32 __val) +{ + le32p_replace_bits(__status, __val, BIT(16)); +} + +static inline void set_rx_buffer_desc_total_length(__le32 *__status, u32 __val) +{ + le32p_replace_bits(__status, __val, GENMASK(30, 16)); +} + +static inline int get_rx_buffer_desc_ls(__le32 *__status) +{ + return le32_get_bits(*(__status), BIT(15)); +} + +static inline int get_rx_buffer_desc_fs(__le32 *__status) +{ + return le32_get_bits(*(__status), BIT(16)); +} + +static inline int get_rx_buffer_desc_total_length(__le32 *__status) +{ + return le32_get_bits(*(__status), GENMASK(30, 16)); +} /* DWORD 1 */ -#define SET_RX_BUFFER_PHYSICAL_LOW(__status, __val) \ - SET_BITS_TO_LE_4BYTE(__status+4, 0, 32, __val) +static inline void set_rx_buffer_physical_low(__le32 *__status, u32 __val) +{ + *(__status + 1) = cpu_to_le32(__val); +} /* DWORD 2 */ -#define SET_RX_BUFFER_PHYSICAL_HIGH(__rx_status_desc, __val, dma64) \ - (dma64 ? SET_BITS_TO_LE_4BYTE((__rx_status_desc) + 8, 0, 32, __val) : 0) - -#define GET_RX_DESC_PKT_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 0, 14) -#define GET_RX_DESC_CRC32(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 14, 1) -#define GET_RX_DESC_ICV(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 15, 1) -#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 16, 4) -#define GET_RX_DESC_SECURITY(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 20, 3) -#define GET_RX_DESC_QOS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 23, 1) -#define GET_RX_DESC_SHIFT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 24, 2) -#define GET_RX_DESC_PHYST(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 26, 1) -#define GET_RX_DESC_SWDEC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 27, 1) -#define GET_RX_DESC_LS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 28, 1) -#define GET_RX_DESC_FS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 29, 1) -#define GET_RX_DESC_EOR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 30, 1) -#define GET_RX_DESC_OWN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 31, 1) - -#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) -#define SET_RX_DESC_EOR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) -#define SET_RX_DESC_OWN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) - -#define GET_RX_DESC_MACID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 0, 7) -#define GET_RX_DESC_TID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 8, 4) -#define GET_RX_DESC_MACID_VLD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 12, 1) -#define GET_RX_DESC_AMSDU(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) -#define GET_RX_DESC_RXID_MATCH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) -#define GET_RX_DESC_PAGGR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) -#define GET_RX_DESC_A1_FIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) -#define GET_RX_DESC_TCPOFFLOAD_CHKERR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) -#define GET_RX_DESC_TCPOFFLOAD_IPVER(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) -#define GET_RX_DESC_TCPOFFLOAD_IS_TCPUDP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 22, 1) -#define GET_RX_DESC_TCPOFFLOAD_CHK_VLD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 23, 1) -#define GET_RX_DESC_PAM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) -#define GET_RX_DESC_PWR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) -#define GET_RX_DESC_MD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) -#define GET_RX_DESC_MF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) -#define GET_RX_DESC_TYPE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) -#define GET_RX_DESC_MC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) -#define GET_RX_DESC_BC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) -#define GET_RX_DESC_SEQ(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) -#define GET_RX_DESC_FRAG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) -#define GET_RX_DESC_RX_IS_QOS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 16, 1) -#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 28, 1) - -#define GET_RX_DESC_RXMCS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 0, 7) -#define GET_RX_DESC_HTC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) -#define GET_RX_STATUS_DESC_EOSP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 11, 1) -#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 12, 2) -#define GET_RX_STATUS_DESC_DMA_AGG_NUM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 16, 8) -#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 29, 1) -#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 30, 1) -#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 31, 1) - -#define GET_RX_DESC_TSFL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) - -#define GET_RX_DESC_BUFF_ADDR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 0, 32) -#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) - -#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) -#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) +static inline void set_rx_buffer_physical_high(__le32 *__rx_status_desc, + u32 __val, bool dma64) +{ + if (dma64) + *(__rx_status_desc + 2) = cpu_to_le32(__val); + else + *(__rx_status_desc + 2) = 0; +} + +static inline int get_rx_desc_pkt_len(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(13, 0)); +} + +static inline int get_rx_desc_crc32(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(14)); +} + +static inline int get_rx_desc_icv(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(15)); +} + +static inline int get_rx_desc_drv_info_size(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(19, 16)); +} + +static inline int get_rx_desc_shift(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(25, 24)); +} + +static inline int get_rx_desc_physt(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(26)); +} + +static inline int get_rx_desc_swdec(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(27)); +} + +static inline int get_rx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(31)); +} + +static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(30)); +} + +static inline int get_rx_desc_macid(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), GENMASK(6, 0)); +} + +static inline int get_rx_desc_paggr(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(15)); +} + +static inline int get_rx_status_desc_rpt_sel(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 2), BIT(28)); +} + +static inline int get_rx_desc_rxmcs(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), GENMASK(6, 0)); +} + +static inline int get_rx_status_desc_pattern_match(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(29)); +} + +static inline int get_rx_status_desc_unicast_match(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(30)); +} + +static inline int get_rx_status_desc_magic_match(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(31)); +} + +static inline u32 get_rx_desc_tsfl(__le32 *__pdesc) +{ + return le32_to_cpu(*((__pdesc + 5))); +} + +static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc) +{ + return le32_to_cpu(*((__pdesc + 6))); +} /* TX report 2 format in Rx desc*/ -#define GET_RX_RPT2_DESC_PKT_LEN(__status) \ - LE_BITS_TO_4BYTE(__status, 0, 9) -#define GET_RX_RPT2_DESC_MACID_VALID_1(__status) \ - LE_BITS_TO_4BYTE(__status+16, 0, 32) -#define GET_RX_RPT2_DESC_MACID_VALID_2(__status) \ - LE_BITS_TO_4BYTE(__status+20, 0, 32) - -#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ -do { \ - if (_size > TX_DESC_NEXT_DESC_OFFSET) \ - memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ - else \ - memset(__pdesc, 0, _size); \ -} while (0) +static inline u32 get_rx_rpt2_desc_macid_valid_1(__le32 *__status) +{ + return le32_to_cpu(*((__status + 4))); +} + +static inline u32 get_rx_rpt2_desc_macid_valid_2(__le32 *__status) +{ + return le32_to_cpu(*((__status + 5))); +} + +static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size) +{ + if (_size > TX_DESC_NEXT_DESC_OFFSET) + memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); + else + memset(__pdesc, 0, _size); +} #define RTL92EE_RX_HAL_IS_CCK_RATE(rxmcs)\ (rxmcs == DESC_RATE1M ||\ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index d1d84e7d47a4..1c7ee569f4bf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -161,8 +161,6 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; - rtlpriv->cfg->mod_params->sw_crypto = - rtlpriv->cfg->mod_params->sw_crypto; if (!rtlpriv->psc.inactiveps) pr_info("Power Save off (module option)\n"); if (!rtlpriv->psc.fwctrl_lps) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h index 42958df6b5d4..84505a8500c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h @@ -11,37 +11,6 @@ #define RX_MPDU_QUEUE 0 #define RX_CMD_QUEUE 1 -#define C2H_RX_CMD_HDR_LEN 8 -#define GET_C2H_CMD_CMD_LEN(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 0, 16) -#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 16, 8) -#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 24, 7) -#define GET_C2H_CMD_CONTINUE(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 31, 1) -#define GET_C2H_CMD_CONTENT(__prxhdr) \ - ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN) - -#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16) -#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) -#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) - #define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) #define CHIP_BONDING_92C_1T2R 0x1 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 4b370410c83c..5702ac6deebf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -129,10 +129,6 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; - rtlpriv->cfg->mod_params->sw_crypto = - rtlpriv->cfg->mod_params->sw_crypto; - rtlpriv->cfg->mod_params->disable_watchdog = - rtlpriv->cfg->mod_params->disable_watchdog; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index 90dc91b0d35b..a04ce15d5538 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -260,28 +260,29 @@ static void translate_rx_signal_stuff(struct ieee80211_hw *hw, bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *status, struct ieee80211_rx_status *rx_status, - u8 *pdesc, struct sk_buff *skb) + u8 *pdesc8, struct sk_buff *skb) { struct rx_fwinfo_8723e *p_drvinfo; struct ieee80211_hdr *hdr; - u32 phystatus = GET_RX_DESC_PHYST(pdesc); + __le32 *pdesc = (__le32 *)pdesc8; + u32 phystatus = get_rx_desc_physt(pdesc); - status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); - status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + status->length = (u16)get_rx_desc_pkt_len(pdesc); + status->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) * RX_DRV_INFO_SIZE_UNIT; - status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03); - status->icv = (u16)GET_RX_DESC_ICV(pdesc); - status->crc = (u16)GET_RX_DESC_CRC32(pdesc); + status->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03); + status->icv = (u16)get_rx_desc_icv(pdesc); + status->crc = (u16)get_rx_desc_crc32(pdesc); status->hwerror = (status->crc | status->icv); - status->decrypted = !GET_RX_DESC_SWDEC(pdesc); - status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); - status->shortpreamble = (u16)GET_RX_DESC_SPLCP(pdesc); - status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); - status->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1) && - (GET_RX_DESC_FAGGR(pdesc) == 1)); - status->timestamp_low = GET_RX_DESC_TSFL(pdesc); - status->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc); - status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); + status->decrypted = !get_rx_desc_swdec(pdesc); + status->rate = (u8)get_rx_desc_rxmcs(pdesc); + status->shortpreamble = (u16)get_rx_desc_splcp(pdesc); + status->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1); + status->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) && + (get_rx_desc_faggr(pdesc) == 1)); + status->timestamp_low = get_rx_desc_tsfl(pdesc); + status->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc); + status->is_ht = (bool)get_rx_desc_rxht(pdesc); status->is_cck = RX_HAL_IS_CCK_RATE(status->rate); @@ -331,7 +332,7 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw, p_drvinfo = (struct rx_fwinfo_8723e *)(skb->data + status->rx_bufshift); - translate_rx_signal_stuff(hw, skb, status, pdesc, p_drvinfo); + translate_rx_signal_stuff(hw, skb, status, pdesc8, p_drvinfo); } rx_status->signal = status->recvsignalpower + 10; return true; @@ -350,7 +351,8 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool b_defaultadapter = true; /* bool b_trigger_ac = false; */ - u8 *pdesc = (u8 *)pdesc_tx; + u8 *pdesc8 = (u8 *)pdesc_tx; + __le32 *pdesc = (__le32 *)pdesc8; u16 seq_number; __le16 fc = hdr->frame_control; u8 fw_qsel = _rtl8723e_map_hwqueue_to_fwqueue(skb, hw_queue); @@ -383,7 +385,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc); - CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723e)); + clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_8723e)); if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) { firstseg = true; @@ -391,58 +393,58 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, } if (firstseg) { - SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); - SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); + set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate); if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble) - SET_TX_DESC_DATA_SHORTGI(pdesc, 1); + set_tx_desc_data_shortgi(pdesc, 1); if (info->flags & IEEE80211_TX_CTL_AMPDU) { - SET_TX_DESC_AGG_BREAK(pdesc, 1); - SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14); + set_tx_desc_agg_break(pdesc, 1); + set_tx_desc_max_agg_num(pdesc, 0x14); } - SET_TX_DESC_SEQ(pdesc, seq_number); + set_tx_desc_seq(pdesc, seq_number); - SET_TX_DESC_RTS_ENABLE(pdesc, + set_tx_desc_rts_enable(pdesc, ((ptcb_desc->rts_enable && !ptcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_HW_RTS_ENABLE(pdesc, + set_tx_desc_hw_rts_enable(pdesc, ((ptcb_desc->rts_enable || ptcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_CTS2SELF(pdesc, + set_tx_desc_cts2self(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_RTS_STBC(pdesc, + set_tx_desc_rts_stbc(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0)); - SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); - SET_TX_DESC_RTS_BW(pdesc, 0); - SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); - SET_TX_DESC_RTS_SHORT(pdesc, + set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate); + set_tx_desc_rts_bw(pdesc, 0); + set_tx_desc_rts_sc(pdesc, ptcb_desc->rts_sc); + set_tx_desc_rts_short(pdesc, ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ? (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : (ptcb_desc->rts_use_shortgi ? 1 : 0))); if (bw_40) { if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { - SET_TX_DESC_DATA_BW(pdesc, 1); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); + set_tx_desc_data_bw(pdesc, 1); + set_tx_desc_tx_sub_carrier(pdesc, 3); } else { - SET_TX_DESC_DATA_BW(pdesc, 0); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, + set_tx_desc_data_bw(pdesc, 0); + set_tx_desc_tx_sub_carrier(pdesc, mac->cur_40_prime_sc); } } else { - SET_TX_DESC_DATA_BW(pdesc, 0); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0); + set_tx_desc_data_bw(pdesc, 0); + set_tx_desc_tx_sub_carrier(pdesc, 0); } - SET_TX_DESC_LINIP(pdesc, 0); - SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len); + set_tx_desc_linip(pdesc, 0); + set_tx_desc_pkt_size(pdesc, (u16)skb->len); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; - SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); + set_tx_desc_ampdu_density(pdesc, ampdu_density); } if (info->control.hw_key) { @@ -453,78 +455,79 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: - SET_TX_DESC_SEC_TYPE(pdesc, 0x1); + set_tx_desc_sec_type(pdesc, 0x1); break; case WLAN_CIPHER_SUITE_CCMP: - SET_TX_DESC_SEC_TYPE(pdesc, 0x3); + set_tx_desc_sec_type(pdesc, 0x3); break; default: - SET_TX_DESC_SEC_TYPE(pdesc, 0x0); + set_tx_desc_sec_type(pdesc, 0x0); break; } } - SET_TX_DESC_PKT_ID(pdesc, 0); - SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel); + set_tx_desc_pkt_id(pdesc, 0); + set_tx_desc_queue_sel(pdesc, fw_qsel); - SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); - SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); - SET_TX_DESC_DISABLE_FB(pdesc, 0); - SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); + set_tx_desc_data_rate_fb_limit(pdesc, 0x1F); + set_tx_desc_rts_rate_fb_limit(pdesc, 0xF); + set_tx_desc_disable_fb(pdesc, 0); + set_tx_desc_use_rate(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); if (ieee80211_is_data_qos(fc)) { if (mac->rdg_en) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Enable RDG function.\n"); - SET_TX_DESC_RDG_ENABLE(pdesc, 1); - SET_TX_DESC_HTC(pdesc, 1); + set_tx_desc_rdg_enable(pdesc, 1); + set_tx_desc_htc(pdesc, 1); } } } - SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); - SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); + set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0)); + set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0)); - SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len); + set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len); - SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + set_tx_desc_tx_buffer_address(pdesc, mapping); if (rtlpriv->dm.useramask) { - SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); - SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + set_tx_desc_rate_id(pdesc, ptcb_desc->ratr_index); + set_tx_desc_macid(pdesc, ptcb_desc->mac_id); } else { - SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index); - SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index); + set_tx_desc_rate_id(pdesc, 0xC + ptcb_desc->ratr_index); + set_tx_desc_macid(pdesc, ptcb_desc->ratr_index); } if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) { - SET_TX_DESC_HWSEQ_EN_8723(pdesc, 1); - /* SET_TX_DESC_HWSEQ_EN(pdesc, 1); */ - /* SET_TX_DESC_PKT_ID(pdesc, 8); */ + set_tx_desc_hwseq_en_8723(pdesc, 1); + /* set_tx_desc_hwseq_en(pdesc, 1); */ + /* set_tx_desc_pkt_id(pdesc, 8); */ if (!b_defaultadapter) - SET_TX_DESC_HWSEQ_SEL_8723(pdesc, 1); - /* SET_TX_DESC_QOS(pdesc, 1); */ + set_tx_desc_hwseq_sel_8723(pdesc, 1); + /* set_tx_desc_qos(pdesc, 1); */ } - SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); + set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1)); if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { - SET_TX_DESC_BMC(pdesc, 1); + set_tx_desc_bmc(pdesc, 1); } RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc, bool firstseg, + u8 *pdesc8, bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 fw_queue = QSLT_BEACON; + __le32 *pdesc = (__le32 *)pdesc8; dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, @@ -538,44 +541,44 @@ void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, "DMA mapping error\n"); return; } - CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); + clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); if (firstseg) - SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); - SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); + set_tx_desc_tx_rate(pdesc, DESC92C_RATE1M); - SET_TX_DESC_SEQ(pdesc, 0); + set_tx_desc_seq(pdesc, 0); - SET_TX_DESC_LINIP(pdesc, 0); + set_tx_desc_linip(pdesc, 0); - SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); + set_tx_desc_queue_sel(pdesc, fw_queue); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); - SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len)); + set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len)); - SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + set_tx_desc_tx_buffer_address(pdesc, mapping); - SET_TX_DESC_RATE_ID(pdesc, 7); - SET_TX_DESC_MACID(pdesc, 0); + set_tx_desc_rate_id(pdesc, 7); + set_tx_desc_macid(pdesc, 0); - SET_TX_DESC_OWN(pdesc, 1); + set_tx_desc_own(pdesc, 1); - SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len)); + set_tx_desc_pkt_size(pdesc, (u16)(skb->len)); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); - SET_TX_DESC_OFFSET(pdesc, 0x20); + set_tx_desc_offset(pdesc, 0x20); - SET_TX_DESC_USE_RATE(pdesc, 1); + set_tx_desc_use_rate(pdesc, 1); if (!ieee80211_is_data_qos(fc)) { - SET_TX_DESC_HWSEQ_EN_8723(pdesc, 1); - /* SET_TX_DESC_HWSEQ_EN(pdesc, 1); */ - /* SET_TX_DESC_PKT_ID(pdesc, 8); */ + set_tx_desc_hwseq_en_8723(pdesc, 1); + /* set_tx_desc_hwseq_en(pdesc, 1); */ + /* set_tx_desc_pkt_id(pdesc, 8); */ } RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, @@ -583,16 +586,18 @@ void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, pdesc, TX_DESC_SIZE); } -void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc, +void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx, u8 desc_name, u8 *val) { + __le32 *pdesc = (__le32 *)pdesc8; + if (istx == true) { switch (desc_name) { case HW_DESC_OWN: - SET_TX_DESC_OWN(pdesc, 1); + set_tx_desc_own(pdesc, 1); break; case HW_DESC_TX_NEXTDESC_ADDR: - SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val); + set_tx_desc_next_desc_address(pdesc, *(u32 *)val); break; default: WARN_ONCE(true, "rtl8723ae: ERR txdesc :%d not processed\n", @@ -602,16 +607,16 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc, } else { switch (desc_name) { case HW_DESC_RXOWN: - SET_RX_DESC_OWN(pdesc, 1); + set_rx_desc_own(pdesc, 1); break; case HW_DESC_RXBUFF_ADDR: - SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val); + set_rx_desc_buff_addr(pdesc, *(u32 *)val); break; case HW_DESC_RXPKT_LEN: - SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val); + set_rx_desc_pkt_len(pdesc, *(u32 *)val); break; case HW_DESC_RXERO: - SET_RX_DESC_EOR(pdesc, 1); + set_rx_desc_eor(pdesc, 1); break; default: WARN_ONCE(true, "rtl8723ae: ERR rxdesc :%d not processed\n", @@ -622,17 +627,18 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc, } u64 rtl8723e_get_desc(struct ieee80211_hw *hw, - u8 *pdesc, bool istx, u8 desc_name) + u8 *pdesc8, bool istx, u8 desc_name) { u32 ret = 0; + __le32 *pdesc = (__le32 *)pdesc8; if (istx == true) { switch (desc_name) { case HW_DESC_OWN: - ret = GET_TX_DESC_OWN(pdesc); + ret = get_tx_desc_own(pdesc); break; case HW_DESC_TXBUFF_ADDR: - ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); + ret = get_tx_desc_tx_buffer_address(pdesc); break; default: WARN_ONCE(true, "rtl8723ae: ERR txdesc :%d not processed\n", @@ -642,13 +648,13 @@ u64 rtl8723e_get_desc(struct ieee80211_hw *hw, } else { switch (desc_name) { case HW_DESC_OWN: - ret = GET_RX_DESC_OWN(pdesc); + ret = get_rx_desc_own(pdesc); break; case HW_DESC_RXPKT_LEN: - ret = GET_RX_DESC_PKT_LEN(pdesc); + ret = get_rx_desc_pkt_len(pdesc); break; case HW_DESC_RXBUFF_ADDR: - ret = GET_RX_DESC_BUFF_ADDR(pdesc); + ret = get_rx_desc_buff_addr(pdesc); break; default: WARN_ONCE(true, "rtl8723ae: ERR rxdesc :%d not processed\n", diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h index 4a19ea76b290..2d25f62a4d52 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h @@ -14,486 +14,324 @@ #define USB_HWDESC_HEADER_LEN 32 #define CRCLENGTH 4 -#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) -#define SET_TX_DESC_OFFSET(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) -#define SET_TX_DESC_BMC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) -#define SET_TX_DESC_HTC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) -#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) -#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) -#define SET_TX_DESC_LINIP(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) -#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) -#define SET_TX_DESC_GF(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) -#define SET_TX_DESC_OWN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) - -#define GET_TX_DESC_PKT_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 0, 16) -#define GET_TX_DESC_OFFSET(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 16, 8) -#define GET_TX_DESC_BMC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 24, 1) -#define GET_TX_DESC_HTC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 25, 1) -#define GET_TX_DESC_LAST_SEG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 26, 1) -#define GET_TX_DESC_FIRST_SEG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 27, 1) -#define GET_TX_DESC_LINIP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 28, 1) -#define GET_TX_DESC_NO_ACM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 29, 1) -#define GET_TX_DESC_GF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 30, 1) -#define GET_TX_DESC_OWN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 31, 1) - -#define SET_TX_DESC_MACID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 5, __val) -#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 5, 1, __val) -#define SET_TX_DESC_BK(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 6, 1, __val) -#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 7, 1, __val) -#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) -#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) -#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) -#define SET_TX_DESC_PIFS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) -#define SET_TX_DESC_RATE_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val) -#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val) -#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) -#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) -#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val) - -#define GET_TX_DESC_MACID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) -#define GET_TX_DESC_AGG_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 5, 1) -#define GET_TX_DESC_AGG_BREAK(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 6, 1) -#define GET_TX_DESC_RDG_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 7, 1) -#define GET_TX_DESC_QUEUE_SEL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 8, 5) -#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) -#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) -#define GET_TX_DESC_PIFS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) -#define GET_TX_DESC_RATE_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) -#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) -#define GET_TX_DESC_EN_DESC_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) -#define GET_TX_DESC_SEC_TYPE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 22, 2) -#define GET_TX_DESC_PKT_OFFSET(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 24, 8) - -#define SET_TX_DESC_RTS_RC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val) -#define SET_TX_DESC_DATA_RC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val) -#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val) -#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) -#define SET_TX_DESC_RAW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) -#define SET_TX_DESC_CCX(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val) -#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) -#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val) -#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val) -#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val) -#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val) -#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val) - -#define GET_TX_DESC_RTS_RC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 0, 6) -#define GET_TX_DESC_DATA_RC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 6, 6) -#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 14, 2) -#define GET_TX_DESC_MORE_FRAG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 17, 1) -#define GET_TX_DESC_RAW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 18, 1) -#define GET_TX_DESC_CCX(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 19, 1) -#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 20, 3) -#define GET_TX_DESC_ANTSEL_A(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 24, 1) -#define GET_TX_DESC_ANTSEL_B(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 25, 1) -#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 26, 2) -#define GET_TX_DESC_TX_ANTL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 28, 2) -#define GET_TX_DESC_TX_ANT_HT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 30, 2) - -#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val) -#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val) -#define SET_TX_DESC_SEQ(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val) -#define SET_TX_DESC_PKT_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 4, __val) - -#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 0, 8) -#define GET_TX_DESC_TAIL_PAGE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 8, 8) -#define GET_TX_DESC_SEQ(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 16, 12) -#define GET_TX_DESC_PKT_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 28, 4) +static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(15, 0)); +} + +static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(23, 16)); +} + +static inline void set_tx_desc_bmc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(24)); +} + +static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(25)); +} + +static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(26)); +} + +static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(27)); +} + +static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(28)); +} + +static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} + +static inline u32 get_tx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(31)); +} + +static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0)); +} + +static inline void set_tx_desc_agg_break(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, BIT(5)); +} + +static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, BIT(7)); +} + +static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16)); +} + +static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22)); +} + +static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, BIT(17)); +} + +static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20)); +} + +static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16)); +} + +static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28)); +} /* For RTL8723 */ -#define SET_TX_DESC_TRIGGER_INT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 30, 1, __val) -#define SET_TX_DESC_HWSEQ_EN_8723(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 31, 1, __val) -#define SET_TX_DESC_HWSEQ_SEL_8723(__txdesc, __value) \ - SET_BITS_TO_LE_4BYTE(__txdesc+16, 6, 2, __value) - -#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val) -#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val) -#define SET_TX_DESC_QOS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val) -#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val) -#define SET_TX_DESC_USE_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val) -#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val) -#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val) -#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val) -#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val) -#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val) -#define SET_TX_DESC_PORT_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val) -#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val) -#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val) -#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val) -#define SET_TX_DESC_TX_STBC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val) -#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val) -#define SET_TX_DESC_DATA_BW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val) -#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val) -#define SET_TX_DESC_RTS_BW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val) -#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val) -#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val) - -#define GET_TX_DESC_RTS_RATE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 0, 5) -#define GET_TX_DESC_AP_DCFE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 5, 1) -#define GET_TX_DESC_QOS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 6, 1) -#define GET_TX_DESC_HWSEQ_EN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 7, 1) -#define GET_TX_DESC_USE_RATE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 8, 1) -#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 9, 1) -#define GET_TX_DESC_DISABLE_FB(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 10, 1) -#define GET_TX_DESC_CTS2SELF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 11, 1) -#define GET_TX_DESC_RTS_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 12, 1) -#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 13, 1) -#define GET_TX_DESC_PORT_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 14, 1) -#define GET_TX_DESC_WAIT_DCTS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 18, 1) -#define GET_TX_DESC_CTS2AP_EN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 19, 1) -#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 20, 2) -#define GET_TX_DESC_TX_STBC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 22, 2) -#define GET_TX_DESC_DATA_SHORT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 24, 1) -#define GET_TX_DESC_DATA_BW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 25, 1) -#define GET_TX_DESC_RTS_SHORT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 26, 1) -#define GET_TX_DESC_RTS_BW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 27, 1) -#define GET_TX_DESC_RTS_SC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 28, 2) -#define GET_TX_DESC_RTS_STBC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 30, 2) - -#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val) -#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val) -#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val) -#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val) -#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val) -#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val) -#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val) -#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val) - -#define GET_TX_DESC_TX_RATE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 0, 6) -#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 6, 1) -#define GET_TX_DESC_CCX_TAG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 7, 1) -#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 8, 5) -#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 13, 4) -#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 17, 1) -#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 18, 6) -#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 24, 8) - -#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val) -#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val) -#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val) -#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val) -#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val) -#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val) -#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val) -#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val)\ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val) - -#define GET_TX_DESC_TXAGC_A(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 0, 5) -#define GET_TX_DESC_TXAGC_B(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 5, 5) -#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 10, 1) -#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 11, 5) -#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 16, 4) -#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 20, 4) -#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 24, 4) -#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 28, 4) - -#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val) -#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 4, __val) -#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 20, 4, __val) -#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val) -#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 28, 4, __val) - -#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 0, 16) -#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 16, 4) -#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 20, 4) -#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 24, 4) -#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 28, 4) - -#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val) -#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val) - -#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+32, 0, 32) -#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+36, 0, 32) - -#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) -#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val) - -#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+40, 0, 32) -#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+44, 0, 32) - -#define GET_RX_DESC_PKT_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 0, 14) -#define GET_RX_DESC_CRC32(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 14, 1) -#define GET_RX_DESC_ICV(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 15, 1) -#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 16, 4) -#define GET_RX_DESC_SECURITY(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 20, 3) -#define GET_RX_DESC_QOS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 23, 1) -#define GET_RX_DESC_SHIFT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 24, 2) -#define GET_RX_DESC_PHYST(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 26, 1) -#define GET_RX_DESC_SWDEC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 27, 1) -#define GET_RX_DESC_LS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 28, 1) -#define GET_RX_DESC_FS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 29, 1) -#define GET_RX_DESC_EOR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 30, 1) -#define GET_RX_DESC_OWN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 31, 1) - -#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) -#define SET_RX_DESC_EOR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) -#define SET_RX_DESC_OWN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) - -#define GET_RX_DESC_MACID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) -#define GET_RX_DESC_TID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 5, 4) -#define GET_RX_DESC_HWRSVD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 9, 5) -#define GET_RX_DESC_PAGGR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) -#define GET_RX_DESC_FAGGR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) -#define GET_RX_DESC_A1_FIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) -#define GET_RX_DESC_A2_FIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 20, 4) -#define GET_RX_DESC_PAM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) -#define GET_RX_DESC_PWR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) -#define GET_RX_DESC_MD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) -#define GET_RX_DESC_MF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) -#define GET_RX_DESC_TYPE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) -#define GET_RX_DESC_MC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) -#define GET_RX_DESC_BC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) -#define GET_RX_DESC_SEQ(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) -#define GET_RX_DESC_FRAG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) -#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 16, 14) -#define GET_RX_DESC_NEXT_IND(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 30, 1) -#define GET_RX_DESC_RSVD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 31, 1) - -#define GET_RX_DESC_RXMCS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 0, 6) -#define GET_RX_DESC_RXHT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 6, 1) -#define GET_RX_DESC_SPLCP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 8, 1) -#define GET_RX_DESC_BW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 9, 1) -#define GET_RX_DESC_HTC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) -#define GET_RX_DESC_HWPC_ERR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 14, 1) -#define GET_RX_DESC_HWPC_IND(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 15, 1) -#define GET_RX_DESC_IV0(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 16, 16) - -#define GET_RX_DESC_IV1(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 0, 32) -#define GET_RX_DESC_TSFL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) - -#define GET_RX_DESC_BUFF_ADDR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 0, 32) -#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) - -#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) -#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) - -#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ -do { \ - if (_size > TX_DESC_NEXT_DESC_OFFSET) \ - memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ - else \ - memset(__pdesc, 0, _size); \ -} while (0) +static inline void set_tx_desc_hwseq_en_8723(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(31)); +} + +static inline void set_tx_desc_hwseq_sel_8723(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 4), __value, GENMASK(7, 6)); +} + +static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0)); +} + +static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(8)); +} + +static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(10)); +} + +static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(11)); +} + +static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(12)); +} + +static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(13)); +} + +static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20)); +} + +static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(25)); +} + +static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(26)); +} + +static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(27)); +} + +static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28)); +} + +static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30)); +} + +static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0)); +} + +static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, BIT(6)); +} + +static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13)); +} + +static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11)); +} + +static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0)); +} + +static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 8) = cpu_to_le32(__val); +} + +static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc) +{ + return le32_to_cpu(*(__pdesc + 8)); +} + +static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 10) = cpu_to_le32(__val); +} + +static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(13, 0)); +} + +static inline u32 get_rx_desc_crc32(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(14)); +} + +static inline u32 get_rx_desc_icv(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(15)); +} + +static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(19, 16)); +} + +static inline u32 get_rx_desc_shift(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(25, 24)); +} + +static inline u32 get_rx_desc_physt(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(26)); +} + +static inline u32 get_rx_desc_swdec(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(27)); +} + +static inline u32 get_rx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(31)); +} + +static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(13, 0)); +} + +static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(30)); +} + +static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} + +static inline u32 get_rx_desc_paggr(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(14)); +} + +static inline u32 get_rx_desc_faggr(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(15)); +} + +static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0)); +} + +static inline u32 get_rx_desc_rxht(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(6)); +} + +static inline u32 get_rx_desc_splcp(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(8)); +} + +static inline u32 get_rx_desc_bw(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(9)); +} + +static inline u32 get_rx_desc_tsfl(__le32 *__pdesc) +{ + return le32_to_cpu(*(__pdesc + 5)); +} + +static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc) +{ + return le32_to_cpu(*(__pdesc + 6)); +} + +static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 6) = cpu_to_le32(__val); +} + +static inline void clear_pci_tx_desc_content(__le32 *__pdesc, u32 _size) +{ + if (_size > TX_DESC_NEXT_DESC_OFFSET) + memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); + else + memset(__pdesc, 0, _size); +} struct rx_fwinfo_8723e { u8 gain_trsw[4]; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 00e6254bf82b..3c8528f0ecb3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -128,10 +128,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; - rtlpriv->cfg->mod_params->sw_crypto = - rtlpriv->cfg->mod_params->sw_crypto; - rtlpriv->cfg->mod_params->disable_watchdog = - rtlpriv->cfg->mod_params->disable_watchdog; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 2; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index d87ba03fe78f..b8081e196cdf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -26,7 +26,8 @@ static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) } static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw, - struct rtl_stats *pstatus, u8 *pdesc, + struct rtl_stats *pstatus, + __le32 *pdesc, struct rx_fwinfo_8723be *p_drvinfo, bool bpacket_match_bssid, bool bpacket_toself, @@ -189,7 +190,7 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw, static void _rtl8723be_translate_rx_signal_stuff(struct ieee80211_hw *hw, struct sk_buff *skb, struct rtl_stats *pstatus, - u8 *pdesc, + __le32 *pdesc, struct rx_fwinfo_8723be *p_drvinfo) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -242,12 +243,12 @@ static void _rtl8723be_translate_rx_signal_stuff(struct ieee80211_hw *hw, } static void _rtl8723be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, - u8 *virtualaddress) + __le32 *virtualaddress) { u32 dwtmp = 0; memset(virtualaddress, 0, 8); - SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num); + set_earlymode_pktnum(virtualaddress, ptcb_desc->empkt_num); if (ptcb_desc->empkt_num == 1) { dwtmp = ptcb_desc->empkt_len[0]; } else { @@ -255,7 +256,7 @@ static void _rtl8723be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; dwtmp += ptcb_desc->empkt_len[1]; } - SET_EARLYMODE_LEN0(virtualaddress, dwtmp); + set_earlymode_len0(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 3) { dwtmp = ptcb_desc->empkt_len[2]; @@ -264,7 +265,7 @@ static void _rtl8723be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; dwtmp += ptcb_desc->empkt_len[3]; } - SET_EARLYMODE_LEN1(virtualaddress, dwtmp); + set_earlymode_len1(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 5) { dwtmp = ptcb_desc->empkt_len[4]; } else { @@ -272,8 +273,8 @@ static void _rtl8723be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; dwtmp += ptcb_desc->empkt_len[5]; } - SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF); - SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4); + set_earlymode_len2_1(virtualaddress, dwtmp & 0xF); + set_earlymode_len2_2(virtualaddress, dwtmp >> 4); if (ptcb_desc->empkt_num <= 7) { dwtmp = ptcb_desc->empkt_len[6]; } else { @@ -281,7 +282,7 @@ static void _rtl8723be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; dwtmp += ptcb_desc->empkt_len[7]; } - SET_EARLYMODE_LEN3(virtualaddress, dwtmp); + set_earlymode_len3(virtualaddress, dwtmp); if (ptcb_desc->empkt_num <= 9) { dwtmp = ptcb_desc->empkt_len[8]; } else { @@ -289,51 +290,52 @@ static void _rtl8723be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; dwtmp += ptcb_desc->empkt_len[9]; } - SET_EARLYMODE_LEN4(virtualaddress, dwtmp); + set_earlymode_len4(virtualaddress, dwtmp); } bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *status, struct ieee80211_rx_status *rx_status, - u8 *pdesc, struct sk_buff *skb) + u8 *pdesc8, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rx_fwinfo_8723be *p_drvinfo; struct ieee80211_hdr *hdr; u8 wake_match; - u32 phystatus = GET_RX_DESC_PHYST(pdesc); + __le32 *pdesc = (__le32 *)pdesc8; + u32 phystatus = get_rx_desc_physt(pdesc); - status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); - status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + status->length = (u16)get_rx_desc_pkt_len(pdesc); + status->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) * RX_DRV_INFO_SIZE_UNIT; - status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03); - status->icv = (u16) GET_RX_DESC_ICV(pdesc); - status->crc = (u16) GET_RX_DESC_CRC32(pdesc); + status->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03); + status->icv = (u16)get_rx_desc_icv(pdesc); + status->crc = (u16)get_rx_desc_crc32(pdesc); status->hwerror = (status->crc | status->icv); - status->decrypted = !GET_RX_DESC_SWDEC(pdesc); - status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); - status->shortpreamble = (u16)GET_RX_DESC_SPLCP(pdesc); - status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); - status->isfirst_ampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); - status->timestamp_low = GET_RX_DESC_TSFL(pdesc); - status->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc); - status->bandwidth = (u8)GET_RX_DESC_BW(pdesc); - status->macid = GET_RX_DESC_MACID(pdesc); - status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); + status->decrypted = !get_rx_desc_swdec(pdesc); + status->rate = (u8)get_rx_desc_rxmcs(pdesc); + status->shortpreamble = (u16)get_rx_desc_splcp(pdesc); + status->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1); + status->isfirst_ampdu = (bool)(get_rx_desc_paggr(pdesc) == 1); + status->timestamp_low = get_rx_desc_tsfl(pdesc); + status->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc); + status->bandwidth = (u8)get_rx_desc_bw(pdesc); + status->macid = get_rx_desc_macid(pdesc); + status->is_ht = (bool)get_rx_desc_rxht(pdesc); status->is_cck = RX_HAL_IS_CCK_RATE(status->rate); - if (GET_RX_STATUS_DESC_RPT_SEL(pdesc)) + if (get_rx_status_desc_rpt_sel(pdesc)) status->packet_report_type = C2H_PACKET; else status->packet_report_type = NORMAL_RX; - if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc)) + if (get_rx_status_desc_pattern_match(pdesc)) wake_match = BIT(2); - else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) + else if (get_rx_status_desc_magic_match(pdesc)) wake_match = BIT(1); - else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) + else if (get_rx_status_desc_unicast_match(pdesc)) wake_match = BIT(0); else wake_match = 0; @@ -392,15 +394,15 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, rx_status->signal = status->recvsignalpower + 10; if (status->packet_report_type == TX_REPORT2) { status->macid_valid_entry[0] = - GET_RX_RPT2_DESC_MACID_VALID_1(pdesc); + get_rx_rpt2_desc_macid_valid_1(pdesc); status->macid_valid_entry[1] = - GET_RX_RPT2_DESC_MACID_VALID_2(pdesc); + get_rx_rpt2_desc_macid_valid_2(pdesc); } return true; } void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, - struct ieee80211_hdr *hdr, u8 *pdesc_tx, + struct ieee80211_hdr *hdr, u8 *pdesc8, u8 *txbd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) @@ -410,7 +412,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb); - u8 *pdesc = (u8 *)pdesc_tx; + __le32 *pdesc = (__le32 *)pdesc8; u16 seq_number; __le16 fc = hdr->frame_control; unsigned int buf_len = 0; @@ -446,78 +448,78 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n"); return; } - CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723be)); + clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_8723be)); if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) { firstseg = true; lastseg = true; } if (firstseg) { if (rtlhal->earlymode_enable) { - SET_TX_DESC_PKT_OFFSET(pdesc, 1); - SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN + + set_tx_desc_pkt_offset(pdesc, 1); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN + EM_HDR_LEN); if (ptcb_desc->empkt_num) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Insert 8 byte.pTcb->EMPktNum:%d\n", ptcb_desc->empkt_num); _rtl8723be_insert_emcontent(ptcb_desc, - (u8 *)(skb->data)); + (__le32 *)(skb->data)); } } else { - SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); } /* ptcb_desc->use_driver_rate = true; */ - SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); + set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate); if (ptcb_desc->hw_rate > DESC92C_RATEMCS0) short_gi = (ptcb_desc->use_shortgi) ? 1 : 0; else short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0; - SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi); + set_tx_desc_data_shortgi(pdesc, short_gi); if (info->flags & IEEE80211_TX_CTL_AMPDU) { - SET_TX_DESC_AGG_ENABLE(pdesc, 1); - SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14); + set_tx_desc_agg_enable(pdesc, 1); + set_tx_desc_max_agg_num(pdesc, 0x14); } - SET_TX_DESC_SEQ(pdesc, seq_number); - SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable && + set_tx_desc_seq(pdesc, seq_number); + set_tx_desc_rts_enable(pdesc, ((ptcb_desc->rts_enable && !ptcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0); - SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? + set_tx_desc_hw_rts_enable(pdesc, 0); + set_tx_desc_cts2self(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0)); - SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); + set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate); - SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); - SET_TX_DESC_RTS_SHORT(pdesc, + set_tx_desc_rts_sc(pdesc, ptcb_desc->rts_sc); + set_tx_desc_rts_short(pdesc, ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ? (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : (ptcb_desc->rts_use_shortgi ? 1 : 0))); if (ptcb_desc->tx_enable_sw_calc_duration) - SET_TX_DESC_NAV_USE_HDR(pdesc, 1); + set_tx_desc_nav_use_hdr(pdesc, 1); if (bw_40) { if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { - SET_TX_DESC_DATA_BW(pdesc, 1); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); + set_tx_desc_data_bw(pdesc, 1); + set_tx_desc_tx_sub_carrier(pdesc, 3); } else { - SET_TX_DESC_DATA_BW(pdesc, 0); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, mac->cur_40_prime_sc); + set_tx_desc_data_bw(pdesc, 0); + set_tx_desc_tx_sub_carrier(pdesc, mac->cur_40_prime_sc); } } else { - SET_TX_DESC_DATA_BW(pdesc, 0); - SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0); + set_tx_desc_data_bw(pdesc, 0); + set_tx_desc_tx_sub_carrier(pdesc, 0); } - SET_TX_DESC_LINIP(pdesc, 0); - SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len); + set_tx_desc_linip(pdesc, 0); + set_tx_desc_pkt_size(pdesc, (u16)skb_len); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; - SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); + set_tx_desc_ampdu_density(pdesc, ampdu_density); } if (info->control.hw_key) { struct ieee80211_key_conf *keyconf = @@ -526,23 +528,23 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: - SET_TX_DESC_SEC_TYPE(pdesc, 0x1); + set_tx_desc_sec_type(pdesc, 0x1); break; case WLAN_CIPHER_SUITE_CCMP: - SET_TX_DESC_SEC_TYPE(pdesc, 0x3); + set_tx_desc_sec_type(pdesc, 0x3); break; default: - SET_TX_DESC_SEC_TYPE(pdesc, 0x0); + set_tx_desc_sec_type(pdesc, 0x0); break; } } - SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel); - SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); - SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); - SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ? + set_tx_desc_queue_sel(pdesc, fw_qsel); + set_tx_desc_data_rate_fb_limit(pdesc, 0x1F); + set_tx_desc_rts_rate_fb_limit(pdesc, 0xF); + set_tx_desc_disable_fb(pdesc, ptcb_desc->disable_ratefallback ? 1 : 0); - SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); + set_tx_desc_use_rate(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); /* Set TxRate and RTSRate in TxDesc */ /* This prevent Tx initial rate of new-coming packets */ @@ -551,46 +553,47 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, if (mac->rdg_en) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Enable RDG function.\n"); - SET_TX_DESC_RDG_ENABLE(pdesc, 1); - SET_TX_DESC_HTC(pdesc, 1); + set_tx_desc_rdg_enable(pdesc, 1); + set_tx_desc_htc(pdesc, 1); } } /* tx report */ - rtl_set_tx_report(ptcb_desc, pdesc, hw, tx_info); + rtl_set_tx_report(ptcb_desc, pdesc8, hw, tx_info); } - SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); - SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); - SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len); - SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0)); + set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0)); + set_tx_desc_tx_buffer_size(pdesc, (u16)buf_len); + set_tx_desc_tx_buffer_address(pdesc, mapping); /* if (rtlpriv->dm.useramask) { */ if (1) { - SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); - SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + set_tx_desc_rate_id(pdesc, ptcb_desc->ratr_index); + set_tx_desc_macid(pdesc, ptcb_desc->mac_id); } else { - SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index); - SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + set_tx_desc_rate_id(pdesc, 0xC + ptcb_desc->ratr_index); + set_tx_desc_macid(pdesc, ptcb_desc->mac_id); } if (!ieee80211_is_data_qos(fc)) { - SET_TX_DESC_HWSEQ_EN(pdesc, 1); - SET_TX_DESC_HWSEQ_SEL(pdesc, 0); + set_tx_desc_hwseq_en(pdesc, 1); + set_tx_desc_hwseq_sel(pdesc, 0); } - SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); + set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1)); if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { - SET_TX_DESC_BMC(pdesc, 1); + set_tx_desc_bmc(pdesc, 1); } RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, +void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 fw_queue = QSLT_BEACON; + __le32 *pdesc = (__le32 *)pdesc8; dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, @@ -601,51 +604,53 @@ void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, "DMA mapping error\n"); return; } - CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); + clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); - SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); - SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); + set_tx_desc_tx_rate(pdesc, DESC92C_RATE1M); - SET_TX_DESC_SEQ(pdesc, 0); + set_tx_desc_seq(pdesc, 0); - SET_TX_DESC_LINIP(pdesc, 0); + set_tx_desc_linip(pdesc, 0); - SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); + set_tx_desc_queue_sel(pdesc, fw_queue); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); - SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len)); + set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len)); - SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + set_tx_desc_tx_buffer_address(pdesc, mapping); - SET_TX_DESC_RATE_ID(pdesc, 0); - SET_TX_DESC_MACID(pdesc, 0); + set_tx_desc_rate_id(pdesc, 0); + set_tx_desc_macid(pdesc, 0); - SET_TX_DESC_OWN(pdesc, 1); + set_tx_desc_own(pdesc, 1); - SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len)); + set_tx_desc_pkt_size(pdesc, (u16)(skb->len)); - SET_TX_DESC_FIRST_SEG(pdesc, 1); - SET_TX_DESC_LAST_SEG(pdesc, 1); + set_tx_desc_first_seg(pdesc, 1); + set_tx_desc_last_seg(pdesc, 1); - SET_TX_DESC_USE_RATE(pdesc, 1); + set_tx_desc_use_rate(pdesc, 1); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n", pdesc, TX_DESC_SIZE); } -void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, +void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx, u8 desc_name, u8 *val) { + __le32 *pdesc = (__le32 *)pdesc8; + if (istx) { switch (desc_name) { case HW_DESC_OWN: - SET_TX_DESC_OWN(pdesc, 1); + set_tx_desc_own(pdesc, 1); break; case HW_DESC_TX_NEXTDESC_ADDR: - SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); + set_tx_desc_next_desc_address(pdesc, *(u32 *)val); break; default: WARN_ONCE(true, "rtl8723be: ERR txdesc :%d not processed\n", @@ -655,16 +660,16 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, } else { switch (desc_name) { case HW_DESC_RXOWN: - SET_RX_DESC_OWN(pdesc, 1); + set_rx_desc_own(pdesc, 1); break; case HW_DESC_RXBUFF_ADDR: - SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val); + set_rx_desc_buff_addr(pdesc, *(u32 *)val); break; case HW_DESC_RXPKT_LEN: - SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val); + set_rx_desc_pkt_len(pdesc, *(u32 *)val); break; case HW_DESC_RXERO: - SET_RX_DESC_EOR(pdesc, 1); + set_rx_desc_eor(pdesc, 1); break; default: WARN_ONCE(true, "rtl8723be: ERR rxdesc :%d not process\n", @@ -675,17 +680,18 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, } u64 rtl8723be_get_desc(struct ieee80211_hw *hw, - u8 *pdesc, bool istx, u8 desc_name) + u8 *pdesc8, bool istx, u8 desc_name) { u32 ret = 0; + __le32 *pdesc = (__le32 *)pdesc8; if (istx) { switch (desc_name) { case HW_DESC_OWN: - ret = GET_TX_DESC_OWN(pdesc); + ret = get_tx_desc_own(pdesc); break; case HW_DESC_TXBUFF_ADDR: - ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); + ret = get_tx_desc_tx_buffer_address(pdesc); break; default: WARN_ONCE(true, "rtl8723be: ERR txdesc :%d not process\n", @@ -695,13 +701,13 @@ u64 rtl8723be_get_desc(struct ieee80211_hw *hw, } else { switch (desc_name) { case HW_DESC_OWN: - ret = GET_RX_DESC_OWN(pdesc); + ret = get_rx_desc_own(pdesc); break; case HW_DESC_RXPKT_LEN: - ret = GET_RX_DESC_PKT_LEN(pdesc); + ret = get_rx_desc_pkt_len(pdesc); break; case HW_DESC_RXBUFF_ADDR: - ret = GET_RX_DESC_BUFF_ADDR(pdesc); + ret = get_rx_desc_buff_addr(pdesc); break; default: WARN_ONCE(true, "rtl8723be: ERR rxdesc :%d not processed\n", diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h index 11e75a4e68bd..174aca20c7e1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h @@ -14,351 +14,385 @@ #define USB_HWDESC_HEADER_LEN 40 #define CRCLENGTH 4 -#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) -#define SET_TX_DESC_OFFSET(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) -#define SET_TX_DESC_BMC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) -#define SET_TX_DESC_HTC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) -#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) -#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) -#define SET_TX_DESC_LINIP(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) -#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) -#define SET_TX_DESC_GF(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) -#define SET_TX_DESC_OWN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) - -#define GET_TX_DESC_PKT_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 0, 16) -#define GET_TX_DESC_OFFSET(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 16, 8) -#define GET_TX_DESC_BMC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 24, 1) -#define GET_TX_DESC_HTC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 25, 1) -#define GET_TX_DESC_LAST_SEG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 26, 1) -#define GET_TX_DESC_FIRST_SEG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 27, 1) -#define GET_TX_DESC_LINIP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 28, 1) -#define GET_TX_DESC_NO_ACM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 29, 1) -#define GET_TX_DESC_GF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 30, 1) -#define GET_TX_DESC_OWN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 31, 1) - -#define SET_TX_DESC_MACID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val) -#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) -#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) -#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) -#define SET_TX_DESC_PIFS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) -#define SET_TX_DESC_RATE_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val) -#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) -#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) -#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val) - - -#define SET_TX_DESC_PAID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val) -#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val) -#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val) -#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val) -#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 14, 2, __val) -#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val) -#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) -#define SET_TX_DESC_RAW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) -#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val) -#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) -#define SET_TX_DESC_BT_INT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val) -#define SET_TX_DESC_GID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val) - - -#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val) -#define SET_TX_DESC_CHK_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val) -#define SET_TX_DESC_EARLY_MODE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val) -#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val) -#define SET_TX_DESC_USE_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val) -#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val) -#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val) -#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val) -#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val) -#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val) -#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val) -#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val) -#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val) -#define SET_TX_DESC_NDPA(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val) -#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val) - - -#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val) -#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val) -#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val) -#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val) -#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val) -#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val) - - -#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val) -#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 4, 1, __val) -#define SET_TX_DESC_DATA_BW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val) -#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val) -#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val) -#define SET_TX_DESC_CTROL_STBC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val) -#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val) -#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val) - -#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val) -#define SET_TX_DESC_MBSSID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 12, 4, __val) -#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 16, 3, __val) -#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 19, 3, __val) -#define SET_TX_DESC_ANTSEL_C(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 22, 3, __val) -#define SET_TX_DESC_ANTSEL_D(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 25, 3, __val) - -#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val) - -#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 0, 16) - -#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val) - -#define SET_TX_DESC_SEQ(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val) - -#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) - -#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+40, 0, 32) - - -#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val) - -#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+48, 0, 32) - -#define GET_RX_DESC_PKT_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 0, 14) -#define GET_RX_DESC_CRC32(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 14, 1) -#define GET_RX_DESC_ICV(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 15, 1) -#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 16, 4) -#define GET_RX_DESC_SECURITY(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 20, 3) -#define GET_RX_DESC_QOS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 23, 1) -#define GET_RX_DESC_SHIFT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 24, 2) -#define GET_RX_DESC_PHYST(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 26, 1) -#define GET_RX_DESC_SWDEC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 27, 1) -#define GET_RX_DESC_LS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 28, 1) -#define GET_RX_DESC_FS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 29, 1) -#define GET_RX_DESC_EOR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 30, 1) -#define GET_RX_DESC_OWN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 31, 1) - -#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) -#define SET_RX_DESC_EOR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) -#define SET_RX_DESC_OWN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) - -#define GET_RX_DESC_MACID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 0, 7) -#define GET_RX_DESC_TID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 8, 4) -#define GET_RX_DESC_AMSDU(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) -#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) -#define GET_RX_DESC_PAGGR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) -#define GET_RX_DESC_A1_FIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) -#define GET_RX_DESC_CHKERR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) -#define GET_RX_DESC_IPVER(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) -#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 22, 1) -#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 23, 1) -#define GET_RX_DESC_PAM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) -#define GET_RX_DESC_PWR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) -#define GET_RX_DESC_MD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) -#define GET_RX_DESC_MF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) -#define GET_RX_DESC_TYPE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) -#define GET_RX_DESC_MC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) -#define GET_RX_DESC_BC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) - - -#define GET_RX_DESC_SEQ(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) -#define GET_RX_DESC_FRAG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) -#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 16, 1) -#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 18, 6) -#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 28, 1) - - -#define GET_RX_DESC_RXMCS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 0, 7) -#define GET_RX_DESC_RXHT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 6, 1) -#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 7, 1) -#define GET_RX_DESC_HTC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) -#define GET_RX_STATUS_DESC_EOSP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 11, 1) -#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 12, 2) - -#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 29, 1) -#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 30, 1) -#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 31, 1) - -#define GET_RX_DESC_SPLCP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 0, 1) -#define GET_RX_STATUS_DESC_LDPC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 1, 1) -#define GET_RX_STATUS_DESC_STBC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 2, 1) -#define GET_RX_DESC_BW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 4, 2) - -#define GET_RX_DESC_TSFL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) - -#define GET_RX_DESC_BUFF_ADDR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 0, 32) -#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) - -#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) -#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) - +static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(15, 0)); +} + +static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(23, 16)); +} + +static inline void set_tx_desc_bmc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(24)); +} + +static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(25)); +} + +static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(26)); +} + +static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(27)); +} + +static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(28)); +} + +static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} + +static inline u32 get_tx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(31)); +} + +static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(6, 0)); +} + +static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(20, 16)); +} + +static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22)); +} + +static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(28, 24)); +} + +static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, BIT(12)); +} + +static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, BIT(13)); +} + +static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, BIT(17)); +} + +static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20)); +} + +static inline void set_tx_desc_hwseq_sel(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, GENMASK(7, 6)); +} + +static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(8)); +} + +static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(10)); +} + +static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(11)); +} + +static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(12)); +} + +static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(13)); +} + +static inline void set_tx_desc_nav_use_hdr(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, BIT(15)); +} + +static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, GENMASK(21, 17)); +} + +static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(6, 0)); +} + +static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(16, 13)); +} + +static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(28, 24)); +} + +static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(3, 0)); +} + +static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, BIT(4)); +} + +static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(6, 5)); +} + +static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, BIT(12)); +} + +static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13)); +} + +static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0)); +} + +static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 8), __val, BIT(15)); +} + +static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 9), __val, GENMASK(23, 12)); +} + +static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 10) = cpu_to_le32(__val); +} + +static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc) +{ + return le32_to_cpu(*((__pdesc + 10))); +} + +static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 12) = cpu_to_le32(__val); +} + +static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(13, 0)); +} + +static inline u32 get_rx_desc_crc32(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(14)); +} + +static inline u32 get_rx_desc_icv(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(15)); +} + +static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(19, 16)); +} + +static inline u32 get_rx_desc_shift(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(25, 24)); +} + +static inline u32 get_rx_desc_physt(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(26)); +} + +static inline u32 get_rx_desc_swdec(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(27)); +} + +static inline u32 get_rx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(31)); +} + +static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(13, 0)); +} + +static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(30)); +} + +static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} + +static inline u32 get_rx_desc_macid(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), GENMASK(6, 0)); +} + +static inline u32 get_rx_desc_paggr(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(15)); +} + +static inline u32 get_rx_status_desc_rpt_sel(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 2), BIT(28)); +} + +static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), GENMASK(6, 0)); +} + +static inline u32 get_rx_desc_rxht(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(6)); +} + +static inline u32 get_rx_status_desc_pattern_match(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(29)); +} + +static inline u32 get_rx_status_desc_unicast_match(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(30)); +} + +static inline u32 get_rx_status_desc_magic_match(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(31)); +} + +static inline u32 get_rx_desc_splcp(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 4), BIT(0)); +} + +static inline u32 get_rx_desc_bw(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 4), GENMASK(5, 4)); +} + +static inline u32 get_rx_desc_tsfl(__le32 *__pdesc) +{ + return le32_to_cpu(*((__pdesc + 5))); +} + +static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc) +{ + return le32_to_cpu(*((__pdesc + 6))); +} + +static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 6) = cpu_to_le32(__val); +} /* TX report 2 format in Rx desc*/ -#define GET_RX_RPT2_DESC_PKT_LEN(__rxstatusdesc) \ - LE_BITS_TO_4BYTE(__rxstatusdesc, 0, 9) -#define GET_RX_RPT2_DESC_MACID_VALID_1(__rxstatusdesc) \ - LE_BITS_TO_4BYTE(__rxstatusdesc+16, 0, 32) -#define GET_RX_RPT2_DESC_MACID_VALID_2(__rxstatusdesc) \ - LE_BITS_TO_4BYTE(__rxstatusdesc+20, 0, 32) - -#define SET_EARLYMODE_PKTNUM(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value) -#define SET_EARLYMODE_LEN0(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value) -#define SET_EARLYMODE_LEN1(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value) -#define SET_EARLYMODE_LEN2_1(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value) -#define SET_EARLYMODE_LEN2_2(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value) -#define SET_EARLYMODE_LEN3(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value) -#define SET_EARLYMODE_LEN4(__paddr, __value) \ - SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value) - -#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ -do { \ - if (_size > TX_DESC_NEXT_DESC_OFFSET) \ - memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ - else \ - memset(__pdesc, 0, _size); \ -} while (0) +static inline u32 get_rx_rpt2_desc_macid_valid_1(__le32 *__rxstatusdesc) +{ + return le32_to_cpu(*((__rxstatusdesc + 4))); +} + +static inline u32 get_rx_rpt2_desc_macid_valid_2(__le32 *__rxstatusdesc) +{ + return le32_to_cpu(*((__rxstatusdesc + 5))); +} + +static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(3, 0)); +} + +static inline void set_earlymode_len0(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(15, 4)); +} + +static inline void set_earlymode_len1(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(27, 16)); +} + +static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(31, 28)); +} + +static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits((__paddr + 1), __value, GENMASK(7, 0)); +} + +static inline void set_earlymode_len3(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits((__paddr + 1), __value, GENMASK(19, 8)); +} + +static inline void set_earlymode_len4(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits((__paddr + 1), __value, GENMASK(31, 20)); +} + +static inline void clear_pci_tx_desc_content(__le32 *__pdesc, u32 _size) +{ + if (_size > TX_DESC_NEXT_DESC_OFFSET) + memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); + else + memset(__pdesc, 0, _size); +} struct phy_rx_agc_info_t { #ifdef __LITTLE_ENDIAN diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h index 827bc5f35d2a..235a7965675c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h @@ -107,37 +107,6 @@ #define MAX_RX_DMA_BUFFER_SIZE_8812 0x3E80 -#define C2H_RX_CMD_HDR_LEN 8 -#define GET_C2H_CMD_CMD_LEN(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 0, 16) -#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 16, 8) -#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 24, 7) -#define GET_C2H_CMD_CONTINUE(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 31, 1) -#define GET_C2H_CMD_CONTENT(__prxhdr) \ - ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN) - -#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16) -#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) -#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) - #define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) #define CHIP_8812 BIT(2) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 408af144098e..979e434a4e73 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -3613,14 +3613,14 @@ u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw) u8 _rtl8812ae_get_right_chnl_place_for_iqk(u8 chnl) { - u8 channel_all[TARGET_CHNL_NUM_2G_5G_8812] = { + static const u8 channel_all[TARGET_CHNL_NUM_2G_5G_8812] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 149, 151, 153, 155, 157, 159, 161, 163, 165}; - u8 place = chnl; + u8 place; if (chnl > 14) { for (place = 14; place < sizeof(channel_all); place++) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index eec7c4ecf3ad..3def6a2b3450 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -145,10 +145,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear; - rtlpriv->cfg->mod_params->sw_crypto = - rtlpriv->cfg->mod_params->sw_crypto; - rtlpriv->cfg->mod_params->disable_watchdog = - rtlpriv->cfg->mod_params->disable_watchdog; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 2; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h index 81951f0c80b6..a9ed6fd41089 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h @@ -214,7 +214,7 @@ static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val) *(__pdesc + 10) = cpu_to_le32(__val); } -static inline int get_tx_desc_tx_buffer_address(__le32 *__pdesc) +static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc) { return le32_to_cpu(*(__pdesc + 10)); } @@ -324,12 +324,12 @@ static inline int get_rx_desc_bw(__le32 *__pdesc) return le32_get_bits(*(__pdesc + 4), GENMASK(5, 4)); } -static inline int get_rx_desc_tsfl(__le32 *__pdesc) +static inline u32 get_rx_desc_tsfl(__le32 *__pdesc) { return le32_to_cpu(*(__pdesc + 5)); } -static inline int get_rx_desc_buff_addr(__le32 *__pdesc) +static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc) { return le32_to_cpu(*(__pdesc + 6)); } @@ -341,12 +341,12 @@ static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val) /* TX report 2 format in Rx desc*/ -static inline int get_rx_rpt2_desc_macid_valid_1(__le32 *__status) +static inline u32 get_rx_rpt2_desc_macid_valid_1(__le32 *__status) { return le32_to_cpu(*(__status + 4)); } -static inline int get_rx_rpt2_desc_macid_valid_2(__le32 *__status) +static inline u32 get_rx_rpt2_desc_macid_valid_2(__le32 *__status) { return le32_to_cpu(*(__status + 5)); } diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 34d68dbf4b4c..4b59f3b46b28 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -239,10 +239,7 @@ static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) mutex_destroy(&rtlpriv->io.bb_mutex); } -/** - * - * Default aggregation handler. Do nothing and just return the oldest skb. - */ +/* Default aggregation handler. Do nothing and just return the oldest skb. */ static struct sk_buff *_none_usb_tx_aggregate_hdl(struct ieee80211_hw *hw, struct sk_buff_head *list) { @@ -756,11 +753,6 @@ static int rtl_usb_start(struct ieee80211_hw *hw) return err; } -/** - * - * - */ - /*======================= tx =========================================*/ static void rtl_usb_cleanup(struct ieee80211_hw *hw) { @@ -786,11 +778,7 @@ static void rtl_usb_cleanup(struct ieee80211_hw *hw) usb_kill_anchored_urbs(&rtlusb->tx_submitted); } -/** - * - * We may add some struct into struct rtl_usb later. Do deinit here. - * - */ +/* We may add some struct into struct rtl_usb later. Do deinit here. */ static void rtl_usb_deinit(struct ieee80211_hw *hw) { rtl_usb_cleanup(hw); diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile index e0bfefd154af..77edee2df8b8 100644 --- a/drivers/net/wireless/realtek/rtw88/Makefile +++ b/drivers/net/wireless/realtek/rtw88/Makefile @@ -9,6 +9,7 @@ rtw88-y += main.o \ rx.o \ mac.o \ phy.o \ + coex.o \ efuse.o \ fw.o \ ps.o \ diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c new file mode 100644 index 000000000000..793b40bdbf7c --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -0,0 +1,2502 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include "main.h" +#include "coex.h" +#include "fw.h" +#include "ps.h" +#include "debug.h" +#include "reg.h" + +static u8 rtw_coex_next_rssi_state(struct rtw_dev *rtwdev, u8 pre_state, + u8 rssi, u8 rssi_thresh) +{ + struct rtw_chip_info *chip = rtwdev->chip; + u8 tol = chip->rssi_tolerance; + u8 next_state; + + if (pre_state == COEX_RSSI_STATE_LOW || + pre_state == COEX_RSSI_STATE_STAY_LOW) { + if (rssi >= (rssi_thresh + tol)) + next_state = COEX_RSSI_STATE_HIGH; + else + next_state = COEX_RSSI_STATE_STAY_LOW; + } else { + if (rssi < rssi_thresh) + next_state = COEX_RSSI_STATE_LOW; + else + next_state = COEX_RSSI_STATE_STAY_HIGH; + } + + return next_state; +} + +static void rtw_coex_limited_tx(struct rtw_dev *rtwdev, + bool tx_limit_en, bool ampdu_limit_en) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + bool wifi_under_b_mode = false; + + if (!chip->scbd_support) + return; + + /* force max tx retry limit = 8 */ + if (coex_stat->wl_tx_limit_en == tx_limit_en && + coex_stat->wl_ampdu_limit_en == ampdu_limit_en) + return; + + if (!coex_stat->wl_tx_limit_en) { + coex_stat->darfrc = rtw_read32(rtwdev, REG_DARFRC); + coex_stat->darfrch = rtw_read32(rtwdev, REG_DARFRCH); + coex_stat->retry_limit = rtw_read16(rtwdev, REG_RETRY_LIMIT); + } + + if (!coex_stat->wl_ampdu_limit_en) + coex_stat->ampdu_max_time = + rtw_read8(rtwdev, REG_AMPDU_MAX_TIME_V1); + + coex_stat->wl_tx_limit_en = tx_limit_en; + coex_stat->wl_ampdu_limit_en = ampdu_limit_en; + + if (tx_limit_en) { + /* set BT polluted packet on for tx rate adaptive, + * not including tx retry broken by PTA + */ + rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_GNT_BT_AWAKE); + + /* set queue life time to avoid can't reach tx retry limit + * if tx is always broken by GNT_BT + */ + rtw_write8_set(rtwdev, REG_LIFETIME_EN, 0xf); + rtw_write16(rtwdev, REG_RETRY_LIMIT, 0x0808); + + /* auto rate fallback step within 8 retries */ + if (wifi_under_b_mode) { + rtw_write32(rtwdev, REG_DARFRC, 0x1000000); + rtw_write32(rtwdev, REG_DARFRCH, 0x1010101); + } else { + rtw_write32(rtwdev, REG_DARFRC, 0x1000000); + rtw_write32(rtwdev, REG_DARFRCH, 0x4030201); + } + } else { + rtw_write8_clr(rtwdev, REG_TX_HANG_CTRL, BIT_EN_GNT_BT_AWAKE); + rtw_write8_clr(rtwdev, REG_LIFETIME_EN, 0xf); + + rtw_write16(rtwdev, REG_RETRY_LIMIT, coex_stat->retry_limit); + rtw_write32(rtwdev, REG_DARFRC, coex_stat->darfrc); + rtw_write32(rtwdev, REG_DARFRCH, coex_stat->darfrch); + } + + if (ampdu_limit_en) + rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, 0x20); + else + rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, + coex_stat->ampdu_max_time); +} + +static void rtw_coex_limited_wl(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_coex_stat *coex_stat = &coex->stat; + bool tx_limit = false; + bool tx_agg_ctrl = false; + + if (coex->under_5g || + coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { + /* no need to limit tx */ + } else { + tx_limit = true; + if (coex_stat->bt_hid_exist || coex_stat->bt_hfp_exist || + coex_stat->bt_hid_pair_num > 0) + tx_agg_ctrl = true; + } + + rtw_coex_limited_tx(rtwdev, tx_limit, tx_agg_ctrl); +} + +static void rtw_coex_wl_ccklock_action(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 para[6] = {0}; + + if (coex->stop_dm) + return; + + para[0] = COEX_H2C69_WL_LEAKAP; + + if (coex_stat->tdma_timer_base == 3 && coex_stat->wl_slot_extend) { + para[1] = PARA1_H2C69_DIS_5MS; /* disable 5ms extend */ + rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); + coex_stat->wl_slot_extend = false; + coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0; + return; + } + + if (coex_stat->wl_slot_extend && coex_stat->wl_force_lps_ctrl && + !coex_stat->wl_cck_lock_ever) { + if (coex_stat->wl_fw_dbg_info[7] <= 5) + coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND]++; + else + coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0; + + if (coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] == 7) { + para[1] = 0x1; /* disable 5ms extend */ + rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); + coex_stat->wl_slot_extend = false; + coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0; + } + } else if (!coex_stat->wl_slot_extend && coex_stat->wl_cck_lock) { + para[1] = 0x0; /* enable 5ms extend */ + rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); + coex_stat->wl_slot_extend = true; + } +} + +static void rtw_coex_wl_ccklock_detect(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + /* TODO: wait for rx_rate_change_notify implement */ + coex_stat->wl_cck_lock = false; + coex_stat->wl_cck_lock_pre = false; + coex_stat->wl_cck_lock_ever = false; +} + +static void rtw_coex_wl_noisy_detect(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u32 cnt_cck; + + /* wifi noisy environment identification */ + cnt_cck = dm_info->cck_ok_cnt + dm_info->cck_err_cnt; + + if (!coex_stat->wl_gl_busy) { + if (cnt_cck > 250) { + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] < 5) + coex_stat->cnt_wl[COEX_CNT_WL_NOISY2]++; + + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] == 5) { + coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] = 0; + coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] = 0; + } + } else if (cnt_cck < 100) { + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] < 5) + coex_stat->cnt_wl[COEX_CNT_WL_NOISY0]++; + + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] == 5) { + coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] = 0; + coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] = 0; + } + } else { + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] < 5) + coex_stat->cnt_wl[COEX_CNT_WL_NOISY1]++; + + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] == 5) { + coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] = 0; + coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] = 0; + } + } + + if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] == 5) + coex_stat->wl_noisy_level = 2; + else if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] == 5) + coex_stat->wl_noisy_level = 1; + else + coex_stat->wl_noisy_level = 0; + } +} + +static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 para[2] = {0}; + + if (coex_stat->tdma_timer_base == type) + return; + + coex_stat->tdma_timer_base = type; + + para[0] = COEX_H2C69_TDMA_SLOT; + + if (type == 3) /* 4-slot */ + para[1] = PARA1_H2C69_TDMA_4SLOT; /* 4-slot */ + else /* 2-slot */ + para[1] = PARA1_H2C69_TDMA_2SLOT; + + rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); + + /* no 5ms_wl_slot_extend for 4-slot mode */ + if (coex_stat->tdma_timer_base == 3) + rtw_coex_wl_ccklock_action(rtwdev); +} + +static void rtw_coex_set_wl_pri_mask(struct rtw_dev *rtwdev, u8 bitmap, + u8 data) +{ + u32 addr; + + addr = REG_BT_COEX_TABLE_H + (bitmap / 8); + bitmap = bitmap % 8; + + rtw_write8_mask(rtwdev, addr, BIT(bitmap), data); +} + +void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u16 val = 0x2; + + if (!chip->scbd_support) + return; + + val |= coex_stat->score_board; + + /* for 8822b, scbd[10] is CQDDR on + * for 8822c, scbd[10] is no fix 2M + */ + if (!chip->new_scbd10_def && (bitpos & COEX_SCBD_FIX2M)) { + if (set) + val &= ~COEX_SCBD_FIX2M; + else + val |= COEX_SCBD_FIX2M; + } else { + if (set) + val |= bitpos; + else + val &= ~bitpos; + } + + if (val != coex_stat->score_board) { + coex_stat->score_board = val; + val |= BIT_BT_INT_EN; + rtw_write16(rtwdev, REG_WIFI_BT_INFO, val); + } +} + +static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (!chip->scbd_support) + return 0; + + return (rtw_read16(rtwdev, REG_WIFI_BT_INFO)) & ~(BIT_BT_INT_EN); +} + +static void rtw_coex_check_rfk(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + u8 cnt = 0; + u32 wait_cnt; + bool btk, wlk; + + if (coex_rfe->wlg_at_btg && chip->scbd_support && + coex_stat->bt_iqk_state != 0xff) { + wait_cnt = COEX_RFK_TIMEOUT / COEX_MIN_DELAY; + do { + /* BT RFK */ + btk = !!(rtw_coex_read_scbd(rtwdev) & COEX_SCBD_BT_RFK); + + /* WL RFK */ + wlk = !!(rtw_read8(rtwdev, REG_ARFR4) & BIT_WL_RFK); + + if (!btk && !wlk) + break; + + mdelay(COEX_MIN_DELAY); + } while (++cnt < wait_cnt); + + if (cnt >= wait_cnt) + coex_stat->bt_iqk_state = 0xff; + } +} + +static void rtw_coex_query_bt_info(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex_stat->bt_disabled) + return; + + rtw_fw_query_bt_info(rtwdev); +} + +static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + bool bt_disabled = false; + u16 score_board; + + if (chip->scbd_support) { + score_board = rtw_coex_read_scbd(rtwdev); + bt_disabled = !(score_board & COEX_SCBD_ONOFF); + } + + if (coex_stat->bt_disabled != bt_disabled) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: BT state changed (%d) -> (%d)\n", + coex_stat->bt_disabled, bt_disabled); + + coex_stat->bt_disabled = bt_disabled; + coex_stat->bt_ble_scan_type = 0; + coex_dm->cur_bt_lna_lvl = 0; + } + + if (!coex_stat->bt_disabled) { + coex_stat->bt_reenable = true; + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->bt_reenable_work, 15 * HZ); + } else { + coex_stat->bt_mailbox_reply = false; + coex_stat->bt_reenable = false; + } +} + +static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_traffic_stats *stats = &rtwdev->stats; + bool is_5G = false; + bool scan = false, link = false; + int i; + u8 rssi_state; + u8 rssi_step; + u8 rssi; + + scan = rtw_flag_check(rtwdev, RTW_FLAG_SCANNING); + coex_stat->wl_connected = !!rtwdev->sta_cnt; + coex_stat->wl_gl_busy = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC); + + if (stats->tx_throughput > stats->rx_throughput) + coex_stat->wl_tput_dir = COEX_WL_TPUT_TX; + else + coex_stat->wl_tput_dir = COEX_WL_TPUT_RX; + + if (scan || link || reason == COEX_RSN_2GCONSTART || + reason == COEX_RSN_2GSCANSTART || reason == COEX_RSN_2GSWITCHBAND) + coex_stat->wl_linkscan_proc = true; + else + coex_stat->wl_linkscan_proc = false; + + rtw_coex_wl_noisy_detect(rtwdev); + + for (i = 0; i < 4; i++) { + rssi_state = coex_dm->wl_rssi_state[i]; + rssi_step = chip->wl_rssi_step[i]; + rssi = rtwdev->dm_info.min_rssi; + rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state, + rssi, rssi_step); + coex_dm->wl_rssi_state[i] = rssi_state; + } + + switch (reason) { + case COEX_RSN_5GSCANSTART: + case COEX_RSN_5GSWITCHBAND: + case COEX_RSN_5GCONSTART: + + is_5G = true; + break; + case COEX_RSN_2GSCANSTART: + case COEX_RSN_2GSWITCHBAND: + case COEX_RSN_2GCONSTART: + + is_5G = false; + break; + default: + if (rtwdev->hal.current_band_type == RTW_BAND_5G) + is_5G = true; + else + is_5G = false; + break; + } + + coex->under_5g = is_5G; +} + +static inline u8 *get_payload_from_coex_resp(struct sk_buff *resp) +{ + struct rtw_c2h_cmd *c2h; + u32 pkt_offset; + + pkt_offset = *((u32 *)resp->cb); + c2h = (struct rtw_c2h_cmd *)(resp->data + pkt_offset); + + return c2h->payload; +} + +void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb) +{ + struct rtw_coex *coex = &rtwdev->coex; + u8 *payload = get_payload_from_coex_resp(skb); + + if (payload[0] != COEX_RESP_ACK_BY_WL_FW) + return; + + skb_queue_tail(&coex->queue, skb); + wake_up(&coex->wait); +} + +static struct sk_buff *rtw_coex_info_request(struct rtw_dev *rtwdev, + struct rtw_coex_info_req *req) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct sk_buff *skb_resp = NULL; + + mutex_lock(&coex->mutex); + + rtw_fw_query_bt_mp_info(rtwdev, req); + + if (!wait_event_timeout(coex->wait, !skb_queue_empty(&coex->queue), + COEX_REQUEST_TIMEOUT)) { + rtw_err(rtwdev, "coex request time out\n"); + goto out; + } + + skb_resp = skb_dequeue(&coex->queue); + if (!skb_resp) { + rtw_err(rtwdev, "failed to get coex info response\n"); + goto out; + } + +out: + mutex_unlock(&coex->mutex); + return skb_resp; +} + +static bool rtw_coex_get_bt_scan_type(struct rtw_dev *rtwdev, u8 *scan_type) +{ + struct rtw_coex_info_req req = {0}; + struct sk_buff *skb; + u8 *payload; + bool ret = false; + + req.op_code = BT_MP_INFO_OP_SCAN_TYPE; + skb = rtw_coex_info_request(rtwdev, &req); + if (!skb) + goto out; + + payload = get_payload_from_coex_resp(skb); + *scan_type = GET_COEX_RESP_BT_SCAN_TYPE(payload); + dev_kfree_skb_any(skb); + ret = true; + +out: + return ret; +} + +static bool rtw_coex_set_lna_constrain_level(struct rtw_dev *rtwdev, + u8 lna_constrain_level) +{ + struct rtw_coex_info_req req = {0}; + struct sk_buff *skb; + bool ret = false; + + req.op_code = BT_MP_INFO_OP_LNA_CONSTRAINT; + req.para1 = lna_constrain_level; + skb = rtw_coex_info_request(rtwdev, &req); + if (!skb) + goto out; + + dev_kfree_skb_any(skb); + ret = true; + +out: + return ret; +} + +static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_chip_info *chip = rtwdev->chip; + u8 i; + u8 rssi_state; + u8 rssi_step; + u8 rssi; + + /* update wl/bt rssi by btinfo */ + for (i = 0; i < COEX_RSSI_STEP; i++) { + rssi_state = coex_dm->bt_rssi_state[i]; + rssi_step = chip->bt_rssi_step[i]; + rssi = coex_stat->bt_rssi; + rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state, + rssi, rssi_step); + coex_dm->bt_rssi_state[i] = rssi_state; + } + + for (i = 0; i < COEX_RSSI_STEP; i++) { + rssi_state = coex_dm->wl_rssi_state[i]; + rssi_step = chip->wl_rssi_step[i]; + rssi = rtwdev->dm_info.min_rssi; + rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state, + rssi, rssi_step); + coex_dm->wl_rssi_state[i] = rssi_state; + } + + if (coex_stat->bt_ble_scan_en && + coex_stat->cnt_bt[COEX_CNT_BT_INFOUPDATE] % 3 == 0) { + u8 scan_type; + + if (rtw_coex_get_bt_scan_type(rtwdev, &scan_type)) { + coex_stat->bt_ble_scan_type = scan_type; + if ((coex_stat->bt_ble_scan_type & 0x1) == 0x1) + coex_stat->bt_init_scan = true; + else + coex_stat->bt_init_scan = false; + } + } + + coex_stat->bt_profile_num = 0; + + /* set link exist status */ + if (!(coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION)) { + coex_stat->bt_link_exist = false; + coex_stat->bt_pan_exist = false; + coex_stat->bt_a2dp_exist = false; + coex_stat->bt_hid_exist = false; + coex_stat->bt_hfp_exist = false; + } else { + /* connection exists */ + coex_stat->bt_link_exist = true; + if (coex_stat->bt_info_lb2 & COEX_INFO_FTP) { + coex_stat->bt_pan_exist = true; + coex_stat->bt_profile_num++; + } else { + coex_stat->bt_pan_exist = false; + } + + if (coex_stat->bt_info_lb2 & COEX_INFO_A2DP) { + coex_stat->bt_a2dp_exist = true; + coex_stat->bt_profile_num++; + } else { + coex_stat->bt_a2dp_exist = false; + } + + if (coex_stat->bt_info_lb2 & COEX_INFO_HID) { + coex_stat->bt_hid_exist = true; + coex_stat->bt_profile_num++; + } else { + coex_stat->bt_hid_exist = false; + } + + if (coex_stat->bt_info_lb2 & COEX_INFO_SCO_ESCO) { + coex_stat->bt_hfp_exist = true; + coex_stat->bt_profile_num++; + } else { + coex_stat->bt_hfp_exist = false; + } + } + + if (coex_stat->bt_info_lb2 & COEX_INFO_INQ_PAGE) { + coex_dm->bt_status = COEX_BTSTATUS_INQ_PAGE; + } else if (!(coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION)) { + coex_dm->bt_status = COEX_BTSTATUS_NCON_IDLE; + } else if (coex_stat->bt_info_lb2 == COEX_INFO_CONNECTION) { + coex_dm->bt_status = COEX_BTSTATUS_CON_IDLE; + } else if ((coex_stat->bt_info_lb2 & COEX_INFO_SCO_ESCO) || + (coex_stat->bt_info_lb2 & COEX_INFO_SCO_BUSY)) { + if (coex_stat->bt_info_lb2 & COEX_INFO_ACL_BUSY) + coex_dm->bt_status = COEX_BTSTATUS_ACL_SCO_BUSY; + else + coex_dm->bt_status = COEX_BTSTATUS_SCO_BUSY; + } else if (coex_stat->bt_info_lb2 & COEX_INFO_ACL_BUSY) { + coex_dm->bt_status = COEX_BTSTATUS_ACL_BUSY; + } else { + coex_dm->bt_status = COEX_BTSTATUS_MAX; + } + + coex_stat->cnt_bt[COEX_CNT_BT_INFOUPDATE]++; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: bt status(%d)\n", coex_dm->bt_status); +} + +static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 link = 0; + u8 center_chan = 0; + u8 bw; + int i; + + bw = rtwdev->hal.current_band_width; + + if (type != COEX_MEDIA_DISCONNECT) + center_chan = rtwdev->hal.current_channel; + + if (center_chan == 0 || (efuse->share_ant && center_chan <= 14)) { + link = 0; + } else if (center_chan <= 14) { + link = 0x1; + + if (bw == RTW_CHANNEL_WIDTH_40) + bw = chip->bt_afh_span_bw40; + else + bw = chip->bt_afh_span_bw20; + } else if (chip->afh_5g_num > 1) { + for (i = 0; i < chip->afh_5g_num; i++) { + if (center_chan == chip->afh_5g[i].wl_5g_ch) { + link = 0x3; + center_chan = chip->afh_5g[i].bt_skip_ch; + bw = chip->afh_5g[i].bt_skip_span; + break; + } + } + } + + coex_dm->wl_ch_info[0] = link; + coex_dm->wl_ch_info[1] = center_chan; + coex_dm->wl_ch_info[2] = bw; + + rtw_fw_wl_ch_info(rtwdev, link, center_chan, bw); +} + +static void rtw_coex_set_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + + if (bt_pwr_dec_lvl == coex_dm->cur_bt_pwr_lvl) + return; + + coex_dm->cur_bt_pwr_lvl = bt_pwr_dec_lvl; + + rtw_fw_force_bt_tx_power(rtwdev, bt_pwr_dec_lvl); +} + +static void rtw_coex_set_bt_rx_gain(struct rtw_dev *rtwdev, u8 bt_lna_lvl) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + + if (bt_lna_lvl == coex_dm->cur_bt_lna_lvl) + return; + + coex_dm->cur_bt_lna_lvl = bt_lna_lvl; + + /* notify BT rx gain table changed */ + if (bt_lna_lvl < 7) { + rtw_coex_set_lna_constrain_level(rtwdev, bt_lna_lvl); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_RXGAIN, true); + } else { + rtw_coex_write_scbd(rtwdev, COEX_SCBD_RXGAIN, false); + } +} + +static void rtw_coex_set_rf_para(struct rtw_dev *rtwdev, + struct coex_rf_para para) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 offset = 0; + + if (coex->freerun && coex_stat->wl_noisy_level <= 1) + offset = 3; + + rtw_coex_set_wl_tx_power(rtwdev, para.wl_pwr_dec_lvl); + rtw_coex_set_bt_tx_power(rtwdev, para.bt_pwr_dec_lvl + offset); + rtw_coex_set_wl_rx_gain(rtwdev, para.wl_low_gain_en); + rtw_coex_set_bt_rx_gain(rtwdev, para.bt_lna_lvl); +} + +u32 rtw_coex_read_indirect_reg(struct rtw_dev *rtwdev, u16 addr) +{ + u32 val; + + if (!ltecoex_read_reg(rtwdev, addr, &val)) { + rtw_err(rtwdev, "failed to read indirect register\n"); + return 0; + } + + return val; +} + +void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr, + u32 mask, u32 val) +{ + u32 shift = __ffs(mask); + u32 tmp; + + tmp = rtw_coex_read_indirect_reg(rtwdev, addr); + tmp = (tmp & (~mask)) | ((val << shift) & mask); + + if (!ltecoex_reg_write(rtwdev, addr, tmp)) + rtw_err(rtwdev, "failed to write indirect register\n"); +} + +static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control) +{ + if (wifi_control) + rtw_write32_set(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH); + else + rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH); +} + +static void rtw_coex_set_gnt_bt(struct rtw_dev *rtwdev, u8 state) +{ + rtw_coex_write_indirect_reg(rtwdev, 0x38, 0xc000, state); + rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x0c00, state); +} + +static void rtw_coex_set_gnt_wl(struct rtw_dev *rtwdev, u8 state) +{ + rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x3000, state); + rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x0300, state); +} + +static void rtw_coex_set_table(struct rtw_dev *rtwdev, u32 table0, u32 table1) +{ +#define DEF_BRK_TABLE_VAL 0xf0ffffff + rtw_write32(rtwdev, REG_BT_COEX_TABLE0, table0); + rtw_write32(rtwdev, REG_BT_COEX_TABLE1, table1); + rtw_write32(rtwdev, REG_BT_COEX_BRK_TABLE, DEF_BRK_TABLE_VAL); +} + +static void rtw_coex_table(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + + coex_dm->cur_table = type; + + if (efuse->share_ant) { + if (type < chip->table_sant_num) + rtw_coex_set_table(rtwdev, + chip->table_sant[type].bt, + chip->table_sant[type].wl); + } else { + type = type - 100; + if (type < chip->table_nsant_num) + rtw_coex_set_table(rtwdev, + chip->table_nsant[type].bt, + chip->table_nsant[type].wl); + } +} + +static void rtw_coex_ignore_wlan_act(struct rtw_dev *rtwdev, bool enable) +{ + struct rtw_coex *coex = &rtwdev->coex; + + if (coex->stop_dm) + return; + + rtw_fw_bt_ignore_wlan_action(rtwdev, enable); +} + +static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type, + u8 lps_val, u8 rpwm_val) +{ + struct rtw_lps_conf *lps_conf = &rtwdev->lps_conf; + struct rtw_vif *rtwvif; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 lps_mode = 0x0; + + lps_mode = rtwdev->lps_conf.mode; + + switch (ps_type) { + case COEX_PS_WIFI_NATIVE: + /* recover to original 32k low power setting */ + coex_stat->wl_force_lps_ctrl = false; + + rtwvif = lps_conf->rtwvif; + if (rtwvif && rtw_in_lps(rtwdev)) + rtw_leave_lps(rtwdev, rtwvif); + break; + case COEX_PS_LPS_OFF: + coex_stat->wl_force_lps_ctrl = true; + if (lps_mode) + rtw_fw_coex_tdma_type(rtwdev, 0x8, 0, 0, 0, 0); + + rtwvif = lps_conf->rtwvif; + if (rtwvif && rtw_in_lps(rtwdev)) + rtw_leave_lps(rtwdev, rtwvif); + break; + default: + break; + } +} + +static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2, + u8 byte3, u8 byte4, u8 byte5) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_chip_info *chip = rtwdev->chip; + u8 ps_type = COEX_PS_WIFI_NATIVE; + bool ap_enable = false; + + if (ap_enable && (byte1 & BIT(4) && !(byte1 & BIT(5)))) { + byte1 &= ~BIT(4); + byte1 |= BIT(5); + + byte5 |= BIT(5); + byte5 &= ~BIT(6); + + ps_type = COEX_PS_WIFI_NATIVE; + rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0); + } else if (byte1 & BIT(4) && !(byte1 & BIT(5))) { + if (chip->pstdma_type == COEX_PSTDMA_FORCE_LPSOFF) + ps_type = COEX_PS_LPS_OFF; + else + ps_type = COEX_PS_LPS_ON; + rtw_coex_power_save_state(rtwdev, ps_type, 0x50, 0x4); + } else { + ps_type = COEX_PS_WIFI_NATIVE; + rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0); + } + + coex_dm->ps_tdma_para[0] = byte1; + coex_dm->ps_tdma_para[1] = byte2; + coex_dm->ps_tdma_para[2] = byte3; + coex_dm->ps_tdma_para[3] = byte4; + coex_dm->ps_tdma_para[4] = byte5; + + rtw_fw_coex_tdma_type(rtwdev, byte1, byte2, byte3, byte4, byte5); +} + +static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 n, type; + bool turn_on; + + if (tcase & TDMA_4SLOT)/* 4-slot (50ms) mode */ + rtw_coex_tdma_timer_base(rtwdev, 3); + else + rtw_coex_tdma_timer_base(rtwdev, 0); + + type = (u8)(tcase & 0xff); + + turn_on = (type == 0 || type == 100) ? false : true; + + if (!force) { + if (turn_on == coex_dm->cur_ps_tdma_on && + type == coex_dm->cur_ps_tdma) { + return; + } + } + + if (turn_on) { + /* enable TBTT interrupt */ + rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, true); + } else { + rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, false); + } + + if (efuse->share_ant) { + if (type < chip->tdma_sant_num) + rtw_coex_set_tdma(rtwdev, + chip->tdma_sant[type].para[0], + chip->tdma_sant[type].para[1], + chip->tdma_sant[type].para[2], + chip->tdma_sant[type].para[3], + chip->tdma_sant[type].para[4]); + } else { + n = type - 100; + if (n < chip->tdma_nsant_num) + rtw_coex_set_tdma(rtwdev, + chip->tdma_nsant[n].para[0], + chip->tdma_nsant[n].para[1], + chip->tdma_nsant[n].para[2], + chip->tdma_nsant[n].para[3], + chip->tdma_nsant[n].para[4]); + } + + /* update pre state */ + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: coex tdma type (%d)\n", type); +} + +static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + u8 ctrl_type = COEX_SWITCH_CTRL_MAX; + u8 pos_type = COEX_SWITCH_TO_MAX; + + if (!force && coex_dm->cur_ant_pos_type == phase) + return; + + coex_dm->cur_ant_pos_type = phase; + + /* avoid switch coex_ctrl_owner during BT IQK */ + rtw_coex_check_rfk(rtwdev); + + switch (phase) { + case COEX_SET_ANT_POWERON: + /* set path control owner to BT at power-on */ + if (coex_stat->bt_disabled) + rtw_coex_coex_ctrl_owner(rtwdev, true); + else + rtw_coex_coex_ctrl_owner(rtwdev, false); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_BT; + break; + case COEX_SET_ANT_INIT: + if (coex_stat->bt_disabled) { + /* set GNT_BT to SW low */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_LOW); + + /* set GNT_WL to SW high */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH); + } else { + /* set GNT_BT to SW high */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH); + + /* set GNT_WL to SW low */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_LOW); + } + + /* set path control owner to wl at initial step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_BT; + break; + case COEX_SET_ANT_WONLY: + /* set GNT_BT to SW Low */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_LOW); + + /* Set GNT_WL to SW high */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH); + + /* set path control owner to wl at initial step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_WLG; + break; + case COEX_SET_ANT_WOFF: + /* set path control owner to BT */ + rtw_coex_coex_ctrl_owner(rtwdev, false); + + ctrl_type = COEX_SWITCH_CTRL_BY_BT; + pos_type = COEX_SWITCH_TO_NOCARE; + break; + case COEX_SET_ANT_2G: + /* set GNT_BT to PTA */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA); + + /* set GNT_WL to PTA */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_HW_PTA); + + /* set path control owner to wl at runtime step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_PTA; + pos_type = COEX_SWITCH_TO_NOCARE; + break; + case COEX_SET_ANT_5G: + /* set GNT_BT to PTA */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH); + + /* set GNT_WL to SW high */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH); + + /* set path control owner to wl at runtime step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_WLA; + break; + case COEX_SET_ANT_2G_FREERUN: + /* set GNT_BT to SW high */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH); + + /* Set GNT_WL to SW high */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH); + + /* set path control owner to wl at runtime step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_WLG_BT; + break; + case COEX_SET_ANT_2G_WLBT: + /* set GNT_BT to SW high */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA); + + /* Set GNT_WL to SW high */ + rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_HW_PTA); + + /* set path control owner to wl at runtime step */ + rtw_coex_coex_ctrl_owner(rtwdev, true); + + ctrl_type = COEX_SWITCH_CTRL_BY_BBSW; + pos_type = COEX_SWITCH_TO_WLG_BT; + break; + default: + WARN(1, "unknown phase when setting antenna path\n"); + return; + } + + if (ctrl_type < COEX_SWITCH_CTRL_MAX && pos_type < COEX_SWITCH_TO_MAX) + rtw_coex_set_ant_switch(rtwdev, ctrl_type, pos_type); +} + +static u8 rtw_coex_algorithm(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 algorithm = COEX_ALGO_NOPROFILE; + u8 profile_map = 0; + + if (coex_stat->bt_hfp_exist) + profile_map |= BPM_HFP; + if (coex_stat->bt_hid_exist) + profile_map |= BPM_HID; + if (coex_stat->bt_a2dp_exist) + profile_map |= BPM_A2DP; + if (coex_stat->bt_pan_exist) + profile_map |= BPM_PAN; + + switch (profile_map) { + case BPM_HFP: + algorithm = COEX_ALGO_HFP; + break; + case BPM_HID: + case BPM_HFP + BPM_HID: + algorithm = COEX_ALGO_HID; + break; + case BPM_HFP + BPM_A2DP: + case BPM_HID + BPM_A2DP: + case BPM_HFP + BPM_HID + BPM_A2DP: + algorithm = COEX_ALGO_A2DP_HID; + break; + case BPM_HFP + BPM_PAN: + case BPM_HID + BPM_PAN: + case BPM_HFP + BPM_HID + BPM_PAN: + algorithm = COEX_ALGO_PAN_HID; + break; + case BPM_HFP + BPM_A2DP + BPM_PAN: + case BPM_HID + BPM_A2DP + BPM_PAN: + case BPM_HFP + BPM_HID + BPM_A2DP + BPM_PAN: + algorithm = COEX_ALGO_A2DP_PAN_HID; + break; + case BPM_PAN: + algorithm = COEX_ALGO_PAN; + break; + case BPM_A2DP + BPM_PAN: + algorithm = COEX_ALGO_A2DP_PAN; + break; + case BPM_A2DP: + if (coex_stat->bt_multi_link) { + if (coex_stat->bt_hid_pair_num > 0) + algorithm = COEX_ALGO_A2DP_HID; + else + algorithm = COEX_ALGO_A2DP_PAN; + } else { + algorithm = COEX_ALGO_A2DP; + } + break; + default: + algorithm = COEX_ALGO_NOPROFILE; + break; + } + + return algorithm; +} + +static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 2; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_freerun(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 level = 0; + + if (efuse->share_ant) + return; + + coex->freerun = true; + + if (coex_stat->wl_connected) + rtw_coex_update_wl_ch_info(rtwdev, COEX_MEDIA_CONNECT); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G_FREERUN); + + rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false); + + if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[0])) + level = 2; + else if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1])) + level = 3; + else if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[2])) + level = 4; + else + level = 5; + + if (level > chip->wl_rf_para_num - 1) + level = chip->wl_rf_para_num - 1; + + if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX) + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[level]); + else + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[level]); + + rtw_coex_table(rtwdev, 100); + rtw_coex_tdma(rtwdev, false, 100); +} + +static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 2; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 1; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + u8 table_case = 0xff, tdma_case = 0xff; + + if (coex_rfe->ant_switch_with_bt && + coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { + if (efuse->share_ant && + COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1])) { + table_case = 0; + tdma_case = 0; + } else if (!efuse->share_ant) { + table_case = 100; + tdma_case = 100; + } + } + + if (table_case != 0xff && tdma_case != 0xff) { + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G_FREERUN); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); + return; + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + + if (efuse->share_ant) { + /* Shared-Ant */ + if (!coex_stat->wl_gl_busy) { + table_case = 10; + tdma_case = 3; + } else if (coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { + table_case = 6; + tdma_case = 7; + } else { + table_case = 12; + tdma_case = 7; + } + } else { + /* Non-Shared-Ant */ + if (!coex_stat->wl_gl_busy) { + table_case = 112; + tdma_case = 104; + } else if ((coex_stat->bt_ble_scan_type & 0x2) && + coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { + table_case = 114; + tdma_case = 103; + } else { + table_case = 112; + tdma_case = 103; + } + } + + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + bool wl_hi_pri = false; + u8 table_case, tdma_case; + + if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 || + coex_stat->wl_hi_pri_task2) + wl_hi_pri = true; + + if (efuse->share_ant) { + /* Shared-Ant */ + if (wl_hi_pri) { + table_case = 15; + if (coex_stat->bt_a2dp_exist && + !coex_stat->bt_pan_exist) + tdma_case = 11; + else if (coex_stat->wl_hi_pri_task1) + tdma_case = 6; + else if (!coex_stat->bt_page) + tdma_case = 8; + else + tdma_case = 9; + } else if (coex_stat->wl_connected) { + table_case = 10; + tdma_case = 10; + } else { + table_case = 1; + tdma_case = 0; + } + } else { + /* Non_Shared-Ant */ + if (wl_hi_pri) { + table_case = 113; + if (coex_stat->bt_a2dp_exist && + !coex_stat->bt_pan_exist) + tdma_case = 111; + else if (coex_stat->wl_hi_pri_task1) + tdma_case = 106; + else if (!coex_stat->bt_page) + tdma_case = 108; + else + tdma_case = 109; + } else if (coex_stat->wl_connected) { + table_case = 101; + tdma_case = 110; + } else { + table_case = 100; + tdma_case = 100; + } + } + + rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: wifi hi(%d), bt page(%d)\n", + wl_hi_pri, coex_stat->bt_page); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->bt_multi_link) { + table_case = 10; + tdma_case = 17; + } else { + table_case = 10; + tdma_case = 5; + } + } else { + /* Non-Shared-Ant */ + if (coex_stat->bt_multi_link) { + table_case = 112; + tdma_case = 117; + } else { + table_case = 105; + tdma_case = 100; + } + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_hid(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + u32 wl_bw; + + wl_bw = rtwdev->hal.current_band_width; + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->bt_ble_exist) { + /* RCU */ + if (!coex_stat->wl_gl_busy) + table_case = 14; + else + table_case = 15; + + if (coex_stat->bt_a2dp_active || wl_bw == 0) + tdma_case = 18; + else if (coex_stat->wl_gl_busy) + tdma_case = 8; + else + tdma_case = 4; + } else { + if (coex_stat->bt_a2dp_active || wl_bw == 0) { + table_case = 8; + tdma_case = 4; + } else { + /* for 4/18 HID */ + if (coex_stat->bt_418_hid_exist && + coex_stat->wl_gl_busy) + table_case = 12; + else + table_case = 10; + tdma_case = 4; + } + } + } else { + /* Non-Shared-Ant */ + if (coex_stat->bt_a2dp_active) { + table_case = 113; + tdma_case = 118; + } else if (coex_stat->bt_ble_exist) { + /* BLE */ + table_case = 113; + + if (coex_stat->wl_gl_busy) + tdma_case = 106; + else + tdma_case = 104; + } else { + table_case = 113; + tdma_case = 104; + } + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + u32 slot_type = 0; + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0) + table_case = 10; + else + table_case = 9; + + slot_type = TDMA_4SLOT; + + if (coex_stat->wl_gl_busy) + tdma_case = 13; + else + tdma_case = 14; + } else { + /* Non-Shared-Ant */ + table_case = 112; + + if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1])) + tdma_case = 112; + else + tdma_case = 113; + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); +} + +static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + bool ap_enable = false; + + if (efuse->share_ant) { /* Shared-Ant */ + if (ap_enable) { + table_case = 2; + tdma_case = 0; + } else if (coex_stat->wl_gl_busy) { + table_case = 28; + tdma_case = 20; + } else { + table_case = 28; + tdma_case = 26; + } + } else { /* Non-Shared-Ant */ + if (ap_enable) { + table_case = 100; + tdma_case = 100; + } else { + table_case = 119; + tdma_case = 120; + } + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0) + table_case = 14; + else + table_case = 10; + + if (coex_stat->wl_gl_busy) + tdma_case = 17; + else + tdma_case = 19; + } else { + /* Non-Shared-Ant */ + table_case = 112; + + if (coex_stat->wl_gl_busy) + tdma_case = 117; + else + tdma_case = 119; + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + u32 slot_type = 0; + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->bt_ble_exist) + table_case = 26; + else + table_case = 9; + + if (coex_stat->wl_gl_busy) { + slot_type = TDMA_4SLOT; + tdma_case = 13; + } else { + tdma_case = 14; + } + } else { + /* Non-Shared-Ant */ + if (coex_stat->bt_ble_exist) + table_case = 121; + else + table_case = 113; + + if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1])) + tdma_case = 112; + else + tdma_case = 113; + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); +} + +static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->wl_gl_busy && + coex_stat->wl_noisy_level == 0) + table_case = 14; + else + table_case = 10; + + if (coex_stat->wl_gl_busy) + tdma_case = 15; + else + tdma_case = 20; + } else { + /* Non-Shared-Ant */ + table_case = 112; + + if (coex_stat->wl_gl_busy) + tdma_case = 115; + else + tdma_case = 120; + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 9; + + if (coex_stat->wl_gl_busy) + tdma_case = 18; + else + tdma_case = 19; + } else { + /* Non-Shared-Ant */ + table_case = 113; + + if (coex_stat->wl_gl_busy) + tdma_case = 117; + else + tdma_case = 119; + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 10; + + if (coex_stat->wl_gl_busy) + tdma_case = 15; + else + tdma_case = 20; + } else { + /* Non-Shared-Ant */ + table_case = 113; + + if (coex_stat->wl_gl_busy) + tdma_case = 115; + else + tdma_case = 120; + } + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false); + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 0; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 2; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (coex->under_5g) + return; + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 28; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (efuse->share_ant) { + /* Shared-Ant */ + if (coex_stat->bt_a2dp_exist) { + table_case = 9; + tdma_case = 11; + } else { + table_case = 9; + tdma_case = 7; + } + } else { + /* Non-Shared-Ant */ + if (coex_stat->bt_a2dp_exist) { + table_case = 112; + tdma_case = 111; + } else { + table_case = 112; + tdma_case = 107; + } + } + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + if (efuse->share_ant) { + /* Shared-Ant */ + table_case = 1; + tdma_case = 0; + } else { + /* Non-Shared-Ant */ + table_case = 100; + tdma_case = 100; + } + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_table(rtwdev, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 algorithm; + + /* Non-Shared-Ant */ + if (!efuse->share_ant && coex_stat->wl_gl_busy && + COEX_RSSI_HIGH(coex_dm->wl_rssi_state[3]) && + COEX_RSSI_HIGH(coex_dm->bt_rssi_state[0])) { + rtw_coex_action_freerun(rtwdev); + return; + } + + algorithm = rtw_coex_algorithm(rtwdev); + + switch (algorithm) { + case COEX_ALGO_HFP: + rtw_coex_action_bt_hfp(rtwdev); + break; + case COEX_ALGO_HID: + rtw_coex_action_bt_hid(rtwdev); + break; + case COEX_ALGO_A2DP: + if (coex_stat->bt_a2dp_sink) + rtw_coex_action_bt_a2dpsink(rtwdev); + else + rtw_coex_action_bt_a2dp(rtwdev); + break; + case COEX_ALGO_PAN: + rtw_coex_action_bt_pan(rtwdev); + break; + case COEX_ALGO_A2DP_HID: + rtw_coex_action_bt_a2dp_hid(rtwdev); + break; + case COEX_ALGO_A2DP_PAN: + rtw_coex_action_bt_a2dp_pan(rtwdev); + break; + case COEX_ALGO_PAN_HID: + rtw_coex_action_bt_pan_hid(rtwdev); + break; + case COEX_ALGO_A2DP_PAN_HID: + rtw_coex_action_bt_a2dp_pan_hid(rtwdev); + break; + default: + case COEX_ALGO_NOPROFILE: + rtw_coex_action_bt_idle(rtwdev); + break; + } +} + +static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_coex_stat *coex_stat = &coex->stat; + + lockdep_assert_held(&rtwdev->mutex); + + coex_dm->reason = reason; + + /* update wifi_link_info_ext variable */ + rtw_coex_update_wl_link_info(rtwdev, reason); + + rtw_coex_monitor_bt_enable(rtwdev); + + if (coex->stop_dm) + return; + + if (coex_stat->wl_under_ips) + return; + + if (coex->freeze && !coex_stat->bt_setup_link) + return; + + coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++; + coex->freerun = false; + + /* Pure-5G Coex Process */ + if (coex->under_5g) { + coex_stat->wl_coex_mode = COEX_WLINK_5G; + rtw_coex_action_wl_under5g(rtwdev); + goto exit; + } + + coex_stat->wl_coex_mode = COEX_WLINK_2G1PORT; + rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false); + if (coex_stat->bt_disabled) { + rtw_coex_action_wl_only(rtwdev); + goto exit; + } + + if (coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl) { + rtw_coex_action_wl_native_lps(rtwdev); + goto exit; + } + + if (coex_stat->bt_whck_test) { + rtw_coex_action_bt_whql_test(rtwdev); + goto exit; + } + + if (coex_stat->bt_setup_link) { + rtw_coex_action_bt_relink(rtwdev); + goto exit; + } + + if (coex_stat->bt_inq_page) { + rtw_coex_action_bt_inquiry(rtwdev); + goto exit; + } + + if ((coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE || + coex_dm->bt_status == COEX_BTSTATUS_CON_IDLE) && + coex_stat->wl_connected) { + rtw_coex_action_bt_idle(rtwdev); + goto exit; + } + + if (coex_stat->wl_linkscan_proc) { + rtw_coex_action_wl_linkscan(rtwdev); + goto exit; + } + + if (coex_stat->wl_connected) + rtw_coex_action_wl_connected(rtwdev); + else + rtw_coex_action_wl_not_connected(rtwdev); + +exit: + rtw_coex_set_gnt_fix(rtwdev); + rtw_coex_limited_wl(rtwdev); +} + +static void rtw_coex_init_coex_var(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; + u8 i; + + memset(coex_dm, 0, sizeof(*coex_dm)); + memset(coex_stat, 0, sizeof(*coex_stat)); + + for (i = 0; i < COEX_CNT_WL_MAX; i++) + coex_stat->cnt_wl[i] = 0; + + for (i = 0; i < COEX_CNT_BT_MAX; i++) + coex_stat->cnt_bt[i] = 0; + + for (i = 0; i < ARRAY_SIZE(coex_dm->bt_rssi_state); i++) + coex_dm->bt_rssi_state[i] = COEX_RSSI_STATE_LOW; + + for (i = 0; i < ARRAY_SIZE(coex_dm->wl_rssi_state); i++) + coex_dm->wl_rssi_state[i] = COEX_RSSI_STATE_LOW; + + coex_stat->wl_coex_mode = COEX_WLINK_MAX; +} + +static void __rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only) +{ + struct rtw_coex *coex = &rtwdev->coex; + + rtw_coex_init_coex_var(rtwdev); + rtw_coex_monitor_bt_enable(rtwdev); + rtw_coex_set_rfe_type(rtwdev); + rtw_coex_set_init(rtwdev); + + /* set Tx response = Hi-Pri (ex: Transmitting ACK,BA,CTS) */ + rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_RSP, 1); + + /* set Tx beacon = Hi-Pri */ + rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_BEACON, 1); + + /* set Tx beacon queue = Hi-Pri */ + rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_BEACONQ, 1); + + /* antenna config */ + if (coex->wl_rf_off) { + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WOFF); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ALL, false); + coex->stop_dm = true; + } else if (wifi_only) { + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WONLY); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN, + true); + coex->stop_dm = true; + } else { + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_INIT); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN, + true); + coex->stop_dm = false; + coex->freeze = true; + } + + /* PTA parameter */ + rtw_coex_table(rtwdev, 0); + rtw_coex_tdma(rtwdev, true, 0); + rtw_coex_query_bt_info(rtwdev); +} + +void rtw_coex_power_on_setting(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + + coex->stop_dm = true; + coex->wl_rf_off = false; + + /* enable BB, we can write 0x948 */ + rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT(0) | BIT(1)); + + rtw_coex_monitor_bt_enable(rtwdev); + rtw_coex_set_rfe_type(rtwdev); + + /* set antenna path to BT */ + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_POWERON); + + /* red x issue */ + rtw_write8(rtwdev, 0xff1a, 0x0); +} + +void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only) +{ + __rtw_coex_init_hw_config(rtwdev, wifi_only); +} + +void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex->stop_dm) + return; + + if (type == COEX_IPS_ENTER) { + coex_stat->wl_under_ips = true; + + /* for lps off */ + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ALL, false); + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WOFF); + rtw_coex_action_coex_all_off(rtwdev); + } else if (type == COEX_IPS_LEAVE) { + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true); + + /* run init hw config (exclude wifi only) */ + __rtw_coex_init_hw_config(rtwdev, false); + /* sw all off */ + + coex_stat->wl_under_ips = false; + } +} + +void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex->stop_dm) + return; + + if (type == COEX_LPS_ENABLE) { + coex_stat->wl_under_lps = true; + + if (coex_stat->wl_force_lps_ctrl) { + /* for ps-tdma */ + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true); + } else { + /* for native ps */ + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, false); + + rtw_coex_run_coex(rtwdev, COEX_RSN_LPS); + } + } else if (type == COEX_LPS_DISABLE) { + coex_stat->wl_under_lps = false; + + /* for lps off */ + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true); + + if (!coex_stat->wl_force_lps_ctrl) + rtw_coex_query_bt_info(rtwdev); + } +} + +void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex->stop_dm) + return; + + coex->freeze = false; + + if (type != COEX_SCAN_FINISH) + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN | + COEX_SCBD_ONOFF, true); + + if (type == COEX_SCAN_START_5G) { + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); + rtw_coex_run_coex(rtwdev, COEX_RSN_5GSCANSTART); + } else if ((type == COEX_SCAN_START_2G) || (type == COEX_SCAN_START)) { + coex_stat->wl_hi_pri_task2 = true; + + /* Force antenna setup for no scan result issue */ + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); + rtw_coex_run_coex(rtwdev, COEX_RSN_2GSCANSTART); + } else { + coex_stat->wl_hi_pri_task2 = false; + rtw_coex_run_coex(rtwdev, COEX_RSN_SCANFINISH); + } +} + +void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + + if (coex->stop_dm) + return; + + if (type == COEX_SWITCH_TO_5G) + rtw_coex_run_coex(rtwdev, COEX_RSN_5GSWITCHBAND); + else if (type == COEX_SWITCH_TO_24G_NOFORSCAN) + rtw_coex_run_coex(rtwdev, COEX_RSN_2GSWITCHBAND); + else + rtw_coex_scan_notify(rtwdev, COEX_SCAN_START_2G); +} + +void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex->stop_dm) + return; + + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN | + COEX_SCBD_ONOFF, true); + + if (type == COEX_ASSOCIATE_5G_START) { + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); + rtw_coex_run_coex(rtwdev, COEX_RSN_5GCONSTART); + } else if (type == COEX_ASSOCIATE_5G_FINISH) { + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); + rtw_coex_run_coex(rtwdev, COEX_RSN_5GCONFINISH); + } else if (type == COEX_ASSOCIATE_START) { + coex_stat->wl_hi_pri_task1 = true; + coex_stat->cnt_wl[COEX_CNT_WL_CONNPKT] = 2; + + /* Force antenna setup for no scan result issue */ + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); + + rtw_coex_run_coex(rtwdev, COEX_RSN_2GCONSTART); + + /* To keep TDMA case during connect process, + * to avoid changed by Btinfo and runcoexmechanism + */ + coex->freeze = true; + ieee80211_queue_delayed_work(rtwdev->hw, &coex->defreeze_work, + 5 * HZ); + } else { + coex_stat->wl_hi_pri_task1 = false; + coex->freeze = false; + + rtw_coex_run_coex(rtwdev, COEX_RSN_2GCONFINISH); + } +} + +void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 para[6] = {0}; + + if (coex->stop_dm) + return; + + if (type == COEX_MEDIA_CONNECT_5G) { + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true); + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); + rtw_coex_run_coex(rtwdev, COEX_RSN_5GMEDIA); + } else if (type == COEX_MEDIA_CONNECT) { + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true); + + /* Force antenna setup for no scan result issue */ + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); + + /* Set CCK Rx high Pri */ + rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_RX_CCK, 1); + + /* always enable 5ms extend if connect */ + para[0] = COEX_H2C69_WL_LEAKAP; + para[1] = PARA1_H2C69_EN_5MS; /* enable 5ms extend */ + rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); + coex_stat->wl_slot_extend = true; + rtw_coex_run_coex(rtwdev, COEX_RSN_2GMEDIA); + } else { + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, false); + + rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_RX_CCK, 0); + + rtw_coex_run_coex(rtwdev, COEX_RSN_MEDIADISCON); + } + + rtw_coex_update_wl_ch_info(rtwdev, type); +} + +void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_chip_info *chip = rtwdev->chip; + unsigned long bt_relink_time; + u8 i, rsp_source = 0, type; + + rsp_source = buf[0] & 0xf; + if (rsp_source >= COEX_BTINFO_SRC_MAX) + rsp_source = COEX_BTINFO_SRC_WL_FW; + + if (rsp_source == COEX_BTINFO_SRC_BT_IQK) { + coex_stat->bt_iqk_state = buf[1]; + if (coex_stat->bt_iqk_state == 1) + coex_stat->cnt_bt[COEX_CNT_BT_IQK]++; + else if (coex_stat->bt_iqk_state == 2) + coex_stat->cnt_bt[COEX_CNT_BT_IQKFAIL]++; + + return; + } + + if (rsp_source == COEX_BTINFO_SRC_BT_SCBD) { + rtw_coex_monitor_bt_enable(rtwdev); + if (coex_stat->bt_disabled != coex_stat->bt_disabled_pre) { + coex_stat->bt_disabled_pre = coex_stat->bt_disabled; + rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO); + } + return; + } + + if (rsp_source == COEX_BTINFO_SRC_BT_RSP || + rsp_source == COEX_BTINFO_SRC_BT_ACT) { + if (coex_stat->bt_disabled) { + coex_stat->bt_disabled = false; + coex_stat->bt_reenable = true; + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->bt_reenable_work, + 15 * HZ); + } + } + + for (i = 0; i < length; i++) { + if (i < COEX_BTINFO_LENGTH_MAX) + coex_stat->bt_info_c2h[rsp_source][i] = buf[i]; + else + break; + } + + if (rsp_source == COEX_BTINFO_SRC_WL_FW) { + rtw_coex_update_bt_link_info(rtwdev); + rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO); + return; + } + + /* get the same info from bt, skip it */ + if (coex_stat->bt_info_c2h[rsp_source][1] == coex_stat->bt_info_lb2 && + coex_stat->bt_info_c2h[rsp_source][2] == coex_stat->bt_info_lb3 && + coex_stat->bt_info_c2h[rsp_source][3] == coex_stat->bt_info_hb0 && + coex_stat->bt_info_c2h[rsp_source][4] == coex_stat->bt_info_hb1 && + coex_stat->bt_info_c2h[rsp_source][5] == coex_stat->bt_info_hb2 && + coex_stat->bt_info_c2h[rsp_source][6] == coex_stat->bt_info_hb3) + return; + + coex_stat->bt_info_lb2 = coex_stat->bt_info_c2h[rsp_source][1]; + coex_stat->bt_info_lb3 = coex_stat->bt_info_c2h[rsp_source][2]; + coex_stat->bt_info_hb0 = coex_stat->bt_info_c2h[rsp_source][3]; + coex_stat->bt_info_hb1 = coex_stat->bt_info_c2h[rsp_source][4]; + coex_stat->bt_info_hb2 = coex_stat->bt_info_c2h[rsp_source][5]; + coex_stat->bt_info_hb3 = coex_stat->bt_info_c2h[rsp_source][6]; + + /* 0xff means BT is under WHCK test */ + coex_stat->bt_whck_test = (coex_stat->bt_info_lb2 == 0xff); + coex_stat->bt_inq_page = ((coex_stat->bt_info_lb2 & BIT(2)) == BIT(2)); + coex_stat->bt_acl_busy = ((coex_stat->bt_info_lb2 & BIT(3)) == BIT(3)); + coex_stat->cnt_bt[COEX_CNT_BT_RETRY] = coex_stat->bt_info_lb3 & 0xf; + if (coex_stat->cnt_bt[COEX_CNT_BT_RETRY] >= 1) + coex_stat->cnt_bt[COEX_CNT_BT_POPEVENT]++; + + coex_stat->bt_fix_2M = ((coex_stat->bt_info_lb3 & BIT(4)) == BIT(4)); + coex_stat->bt_inq = ((coex_stat->bt_info_lb3 & BIT(5)) == BIT(5)); + if (coex_stat->bt_inq) + coex_stat->cnt_bt[COEX_CNT_BT_INQ]++; + + coex_stat->bt_page = ((coex_stat->bt_info_lb3 & BIT(7)) == BIT(7)); + if (coex_stat->bt_page) { + coex_stat->cnt_bt[COEX_CNT_BT_PAGE]++; + if (coex_stat->wl_linkscan_proc || + coex_stat->wl_hi_pri_task1 || + coex_stat->wl_hi_pri_task2 || coex_stat->wl_gl_busy) + rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, true); + else + rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, false); + } else { + rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, false); + } + + /* unit: % (value-100 to translate to unit: dBm in coex info) */ + if (chip->bt_rssi_type == COEX_BTRSSI_RATIO) { + coex_stat->bt_rssi = coex_stat->bt_info_hb0 * 2 + 10; + } else { /* original unit: dbm -> unit: % -> value-100 in coex info */ + if (coex_stat->bt_info_hb0 <= 127) + coex_stat->bt_rssi = 100; + else if (256 - coex_stat->bt_info_hb0 <= 100) + coex_stat->bt_rssi = 100 - (256 - coex_stat->bt_info_hb0); + else + coex_stat->bt_rssi = 0; + } + + coex_stat->bt_ble_exist = ((coex_stat->bt_info_hb1 & BIT(0)) == BIT(0)); + if (coex_stat->bt_info_hb1 & BIT(1)) + coex_stat->cnt_bt[COEX_CNT_BT_REINIT]++; + + if (coex_stat->bt_info_hb1 & BIT(2)) { + coex_stat->cnt_bt[COEX_CNT_BT_SETUPLINK]++; + coex_stat->bt_setup_link = true; + if (coex_stat->bt_reenable) + bt_relink_time = 6 * HZ; + else + bt_relink_time = 2 * HZ; + + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->bt_relink_work, + bt_relink_time); + } + + if (coex_stat->bt_info_hb1 & BIT(3)) + coex_stat->cnt_bt[COEX_CNT_BT_IGNWLANACT]++; + + coex_stat->bt_ble_voice = ((coex_stat->bt_info_hb1 & BIT(4)) == BIT(4)); + coex_stat->bt_ble_scan_en = ((coex_stat->bt_info_hb1 & BIT(5)) == BIT(5)); + if (coex_stat->bt_info_hb1 & BIT(6)) + coex_stat->cnt_bt[COEX_CNT_BT_ROLESWITCH]++; + + coex_stat->bt_multi_link = ((coex_stat->bt_info_hb1 & BIT(7)) == BIT(7)); + /* resend wifi info to bt, it is reset and lost the info */ + if ((coex_stat->bt_info_hb1 & BIT(1))) { + if (coex_stat->wl_connected) + type = COEX_MEDIA_CONNECT; + else + type = COEX_MEDIA_DISCONNECT; + rtw_coex_update_wl_ch_info(rtwdev, type); + } + + /* if ignore_wlan_act && not set_up_link */ + if ((coex_stat->bt_info_hb1 & BIT(3)) && + (!(coex_stat->bt_info_hb1 & BIT(2)))) + rtw_coex_ignore_wlan_act(rtwdev, false); + + coex_stat->bt_opp_exist = ((coex_stat->bt_info_hb2 & BIT(0)) == BIT(0)); + if (coex_stat->bt_info_hb2 & BIT(1)) + coex_stat->cnt_bt[COEX_CNT_BT_AFHUPDATE]++; + + coex_stat->bt_a2dp_active = (coex_stat->bt_info_hb2 & BIT(2)) == BIT(2); + coex_stat->bt_slave = ((coex_stat->bt_info_hb2 & BIT(3)) == BIT(3)); + coex_stat->bt_hid_slot = (coex_stat->bt_info_hb2 & 0x30) >> 4; + coex_stat->bt_hid_pair_num = (coex_stat->bt_info_hb2 & 0xc0) >> 6; + if (coex_stat->bt_hid_pair_num > 0 && coex_stat->bt_hid_slot >= 2) + coex_stat->bt_418_hid_exist = true; + else if (coex_stat->bt_hid_pair_num == 0) + coex_stat->bt_418_hid_exist = false; + + if ((coex_stat->bt_info_lb2 & 0x49) == 0x49) + coex_stat->bt_a2dp_bitpool = (coex_stat->bt_info_hb3 & 0x7f); + else + coex_stat->bt_a2dp_bitpool = 0; + + coex_stat->bt_a2dp_sink = ((coex_stat->bt_info_hb3 & BIT(7)) == BIT(7)); + + rtw_coex_update_bt_link_info(rtwdev); + rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO); +} + +void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 val; + int i; + + if (WARN(length < 8, "invalid wl info c2h length\n")) + return; + + if (buf[0] != 0x08) + return; + + for (i = 1; i < 8; i++) { + val = coex_stat->wl_fw_dbg_info_pre[i]; + if (buf[i] >= val) + coex_stat->wl_fw_dbg_info[i] = buf[i] - val; + else + coex_stat->wl_fw_dbg_info[i] = val - buf[i]; + + coex_stat->wl_fw_dbg_info_pre[i] = buf[i]; + } + + coex_stat->cnt_wl[COEX_CNT_WL_FW_NOTIFY]++; + rtw_coex_wl_ccklock_action(rtwdev); + rtw_coex_wl_ccklock_detect(rtwdev); +} + +void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + + if (coex->stop_dm) + return; + + rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS); +} + +void rtw_coex_bt_relink_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.bt_relink_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->bt_setup_link = false; + rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS); + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_bt_reenable_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.bt_reenable_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->bt_reenable = false; + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_defreeze_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.defreeze_work.work); + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex->freeze = false; + coex_stat->wl_hi_pri_task1 = false; + rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS); + mutex_unlock(&rtwdev->mutex); +} diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h new file mode 100644 index 000000000000..008d1af5996b --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/coex.h @@ -0,0 +1,370 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW_COEX_H__ +#define __RTW_COEX_H__ + +/* BT profile map bit definition */ +#define BPM_HFP BIT(0) +#define BPM_HID BIT(1) +#define BPM_A2DP BIT(2) +#define BPM_PAN BIT(3) + +#define COEX_RESP_ACK_BY_WL_FW 0x1 +#define COEX_REQUEST_TIMEOUT msecs_to_jiffies(10) + +#define COEX_MIN_DELAY 10 /* delay unit in ms */ +#define COEX_RFK_TIMEOUT 600 /* RFK timeout in ms */ + +#define COEX_RF_OFF 0x0 +#define COEX_RF_ON 0x1 + +#define COEX_H2C69_WL_LEAKAP 0xc +#define PARA1_H2C69_DIS_5MS 0x1 +#define PARA1_H2C69_EN_5MS 0x0 + +#define COEX_H2C69_TDMA_SLOT 0xb +#define PARA1_H2C69_TDMA_4SLOT 0xc1 +#define PARA1_H2C69_TDMA_2SLOT 0x1 + +#define TDMA_4SLOT BIT(8) + +#define COEX_RSSI_STEP 4 +#define COEX_RSSI_HIGH(rssi) \ + ({ typeof(rssi) __rssi__ = rssi; \ + (__rssi__ == COEX_RSSI_STATE_HIGH || \ + __rssi__ == COEX_RSSI_STATE_STAY_HIGH ? true : false); }) + +#define COEX_RSSI_MEDIUM(rssi) \ + ({ typeof(rssi) __rssi__ = rssi; \ + (__rssi__ == COEX_RSSI_STATE_MEDIUM || \ + __rssi__ == COEX_RSSI_STATE_STAY_MEDIUM ? true : false); }) + +#define COEX_RSSI_LOW(rssi) \ + ({ typeof(rssi) __rssi__ = rssi; \ + (__rssi__ == COEX_RSSI_STATE_LOW || \ + __rssi__ == COEX_RSSI_STATE_STAY_LOW ? true : false); }) + +#define GET_COEX_RESP_BT_SCAN_TYPE(payload) \ + le64_get_bits(*((__le64 *)(payload)), GENMASK(31, 24)) + +enum coex_mp_info_op { + BT_MP_INFO_OP_PATCH_VER = 0x00, + BT_MP_INFO_OP_READ_REG = 0x11, + BT_MP_INFO_OP_SUPP_FEAT = 0x2a, + BT_MP_INFO_OP_SUPP_VER = 0x2b, + BT_MP_INFO_OP_SCAN_TYPE = 0x2d, + BT_MP_INFO_OP_LNA_CONSTRAINT = 0x32, +}; + +enum coex_set_ant_phase { + COEX_SET_ANT_INIT, + COEX_SET_ANT_WONLY, + COEX_SET_ANT_WOFF, + COEX_SET_ANT_2G, + COEX_SET_ANT_5G, + COEX_SET_ANT_POWERON, + COEX_SET_ANT_2G_WLBT, + COEX_SET_ANT_2G_FREERUN, + + COEX_SET_ANT_MAX +}; + +enum coex_runreason { + COEX_RSN_2GSCANSTART = 0, + COEX_RSN_5GSCANSTART = 1, + COEX_RSN_SCANFINISH = 2, + COEX_RSN_2GSWITCHBAND = 3, + COEX_RSN_5GSWITCHBAND = 4, + COEX_RSN_2GCONSTART = 5, + COEX_RSN_5GCONSTART = 6, + COEX_RSN_2GCONFINISH = 7, + COEX_RSN_5GCONFINISH = 8, + COEX_RSN_2GMEDIA = 9, + COEX_RSN_5GMEDIA = 10, + COEX_RSN_MEDIADISCON = 11, + COEX_RSN_BTINFO = 12, + COEX_RSN_LPS = 13, + COEX_RSN_WLSTATUS = 14, + + COEX_RSN_MAX +}; + +enum coex_lte_coex_table_type { + COEX_CTT_WL_VS_LTE, + COEX_CTT_BT_VS_LTE, +}; + +enum coex_gnt_setup_state { + COEX_GNT_SET_HW_PTA = 0x0, + COEX_GNT_SET_SW_LOW = 0x1, + COEX_GNT_SET_SW_HIGH = 0x3, +}; + +enum coex_ext_ant_switch_pos_type { + COEX_SWITCH_TO_BT, + COEX_SWITCH_TO_WLG, + COEX_SWITCH_TO_WLA, + COEX_SWITCH_TO_NOCARE, + COEX_SWITCH_TO_WLG_BT, + + COEX_SWITCH_TO_MAX +}; + +enum coex_ext_ant_switch_ctrl_type { + COEX_SWITCH_CTRL_BY_BBSW, + COEX_SWITCH_CTRL_BY_PTA, + COEX_SWITCH_CTRL_BY_ANTDIV, + COEX_SWITCH_CTRL_BY_MAC, + COEX_SWITCH_CTRL_BY_BT, + COEX_SWITCH_CTRL_BY_FW, + + COEX_SWITCH_CTRL_MAX +}; + +enum coex_algorithm { + COEX_ALGO_NOPROFILE = 0, + COEX_ALGO_HFP = 1, + COEX_ALGO_HID = 2, + COEX_ALGO_A2DP = 3, + COEX_ALGO_PAN = 4, + COEX_ALGO_A2DP_HID = 5, + COEX_ALGO_A2DP_PAN = 6, + COEX_ALGO_PAN_HID = 7, + COEX_ALGO_A2DP_PAN_HID = 8, + + COEX_ALGO_MAX +}; + +enum coex_wl_link_mode { + COEX_WLINK_2G1PORT = 0x0, + COEX_WLINK_5G = 0x3, + COEX_WLINK_MAX +}; + +enum coex_wl2bt_scoreboard { + COEX_SCBD_ACTIVE = BIT(0), + COEX_SCBD_ONOFF = BIT(1), + COEX_SCBD_SCAN = BIT(2), + COEX_SCBD_UNDERTEST = BIT(3), + COEX_SCBD_RXGAIN = BIT(4), + COEX_SCBD_BT_RFK = BIT(5), + COEX_SCBD_WLBUSY = BIT(6), + COEX_SCBD_EXTFEM = BIT(8), + COEX_SCBD_TDMA = BIT(9), + COEX_SCBD_FIX2M = BIT(10), + COEX_SCBD_ALL = GENMASK(15, 0), +}; + +enum coex_power_save_type { + COEX_PS_WIFI_NATIVE = 0, + COEX_PS_LPS_ON = 1, + COEX_PS_LPS_OFF = 2, +}; + +enum coex_rssi_state { + COEX_RSSI_STATE_HIGH, + COEX_RSSI_STATE_MEDIUM, + COEX_RSSI_STATE_LOW, + COEX_RSSI_STATE_STAY_HIGH, + COEX_RSSI_STATE_STAY_MEDIUM, + COEX_RSSI_STATE_STAY_LOW, +}; + +enum coex_notify_type_ips { + COEX_IPS_LEAVE = 0x0, + COEX_IPS_ENTER = 0x1, +}; + +enum coex_notify_type_lps { + COEX_LPS_DISABLE = 0x0, + COEX_LPS_ENABLE = 0x1, +}; + +enum coex_notify_type_scan { + COEX_SCAN_FINISH, + COEX_SCAN_START, + COEX_SCAN_START_2G, + COEX_SCAN_START_5G, +}; + +enum coex_notify_type_switchband { + COEX_NOT_SWITCH, + COEX_SWITCH_TO_24G, + COEX_SWITCH_TO_5G, + COEX_SWITCH_TO_24G_NOFORSCAN, +}; + +enum coex_notify_type_associate { + COEX_ASSOCIATE_FINISH, + COEX_ASSOCIATE_START, + COEX_ASSOCIATE_5G_FINISH, + COEX_ASSOCIATE_5G_START, +}; + +enum coex_notify_type_media_status { + COEX_MEDIA_DISCONNECT, + COEX_MEDIA_CONNECT, + COEX_MEDIA_CONNECT_5G, +}; + +enum coex_bt_status { + COEX_BTSTATUS_NCON_IDLE = 0, + COEX_BTSTATUS_CON_IDLE = 1, + COEX_BTSTATUS_INQ_PAGE = 2, + COEX_BTSTATUS_ACL_BUSY = 3, + COEX_BTSTATUS_SCO_BUSY = 4, + COEX_BTSTATUS_ACL_SCO_BUSY = 5, + + COEX_BTSTATUS_MAX +}; + +enum coex_wl_tput_dir { + COEX_WL_TPUT_TX = 0x0, + COEX_WL_TPUT_RX = 0x1, + COEX_WL_TPUT_MAX +}; + +enum coex_wl_priority_mask { + COEX_WLPRI_RX_RSP = 2, + COEX_WLPRI_TX_RSP = 3, + COEX_WLPRI_TX_BEACON = 4, + COEX_WLPRI_TX_OFDM = 11, + COEX_WLPRI_TX_CCK = 12, + COEX_WLPRI_TX_BEACONQ = 27, + COEX_WLPRI_RX_CCK = 28, + COEX_WLPRI_RX_OFDM = 29, + COEX_WLPRI_MAX +}; + +enum coex_commom_chip_setup { + COEX_CSETUP_INIT_HW = 0x0, + COEX_CSETUP_ANT_SWITCH = 0x1, + COEX_CSETUP_GNT_FIX = 0x2, + COEX_CSETUP_GNT_DEBUG = 0x3, + COEX_CSETUP_RFE_TYPE = 0x4, + COEX_CSETUP_COEXINFO_HW = 0x5, + COEX_CSETUP_WL_TX_POWER = 0x6, + COEX_CSETUP_WL_RX_GAIN = 0x7, + COEX_CSETUP_WLAN_ACT_IPS = 0x8, + COEX_CSETUP_MAX +}; + +enum coex_indirect_reg_type { + COEX_INDIRECT_1700 = 0x0, + COEX_INDIRECT_7C0 = 0x1, + COEX_INDIRECT_MAX +}; + +enum coex_pstdma_type { + COEX_PSTDMA_FORCE_LPSOFF = 0x0, + COEX_PSTDMA_FORCE_LPSON = 0x1, + COEX_PSTDMA_MAX +}; + +enum coex_btrssi_type { + COEX_BTRSSI_RATIO = 0x0, + COEX_BTRSSI_DBM = 0x1, + COEX_BTRSSI_MAX +}; + +struct coex_table_para { + u32 bt; + u32 wl; +}; + +struct coex_tdma_para { + u8 para[5]; +}; + +struct coex_5g_afh_map { + u32 wl_5g_ch; + u8 bt_skip_ch; + u8 bt_skip_span; +}; + +struct coex_rf_para { + u8 wl_pwr_dec_lvl; + u8 bt_pwr_dec_lvl; + bool wl_low_gain_en; + u8 bt_lna_lvl; +}; + +static inline void rtw_coex_set_init(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_init(rtwdev); +} + +static inline +void rtw_coex_set_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, u8 pos_type) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (!chip->ops->coex_set_ant_switch) + return; + + chip->ops->coex_set_ant_switch(rtwdev, ctrl_type, pos_type); +} + +static inline void rtw_coex_set_gnt_fix(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_gnt_fix(rtwdev); +} + +static inline void rtw_coex_set_gnt_debug(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_gnt_debug(rtwdev); +} + +static inline void rtw_coex_set_rfe_type(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_rfe_type(rtwdev); +} + +static inline void rtw_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_wl_tx_power(rtwdev, wl_pwr); +} + +static inline +void rtw_coex_set_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + chip->ops->coex_set_wl_rx_gain(rtwdev, low_gain); +} + +void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb); +u32 rtw_coex_read_indirect_reg(struct rtw_dev *rtwdev, u16 addr); +void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr, + u32 mask, u32 val); +void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set); + +void rtw_coex_bt_relink_work(struct work_struct *work); +void rtw_coex_bt_reenable_work(struct work_struct *work); +void rtw_coex_defreeze_work(struct work_struct *work); + +void rtw_coex_power_on_setting(struct rtw_dev *rtwdev); +void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only); +void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 action); +void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 status); +void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 len); +void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length); +void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev); + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index f0ae26018f97..6ad985e98e42 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -8,6 +8,7 @@ #include "sec.h" #include "fw.h" #include "debug.h" +#include "phy.h" #ifdef CONFIG_RTW88_DEBUGFS @@ -76,7 +77,7 @@ static const struct file_operations file_ops_single_r = { .open = rtw_debugfs_single_open_rw, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; static const struct file_operations file_ops_single_rw = { @@ -460,6 +461,112 @@ static int rtw_debug_get_rf_dump(struct seq_file *m, void *v) return 0; } +static void rtw_print_cck_rate_txt(struct seq_file *m, u8 rate) +{ + static const char * const + cck_rate[] = {"1M", "2M", "5.5M", "11M"}; + u8 idx = rate - DESC_RATE1M; + + seq_printf(m, " CCK_%-5s", cck_rate[idx]); +} + +static void rtw_print_ofdm_rate_txt(struct seq_file *m, u8 rate) +{ + static const char * const + ofdm_rate[] = {"6M", "9M", "12M", "18M", "24M", "36M", "48M", "54M"}; + u8 idx = rate - DESC_RATE6M; + + seq_printf(m, " OFDM_%-4s", ofdm_rate[idx]); +} + +static void rtw_print_ht_rate_txt(struct seq_file *m, u8 rate) +{ + u8 mcs_n = rate - DESC_RATEMCS0; + + seq_printf(m, " MCS%-6u", mcs_n); +} + +static void rtw_print_vht_rate_txt(struct seq_file *m, u8 rate) +{ + u8 idx = rate - DESC_RATEVHT1SS_MCS0; + u8 n_ss, mcs_n; + + /* n spatial stream */ + n_ss = 1 + idx / 10; + /* MCS n */ + mcs_n = idx % 10; + seq_printf(m, " VHT%uSMCS%u", n_ss, mcs_n); +} + +static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_hal *hal = &rtwdev->hal; + void (*print_rate)(struct seq_file *, u8) = NULL; + u8 path, rate; + struct rtw_power_params pwr_param = {0}; + u8 bw = hal->current_band_width; + u8 ch = hal->current_channel; + u8 regd = rtwdev->regd.txpwr_regd; + + seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s)\n", + "path", "rate", "pwr", "", "base", "", "byr", "lmt"); + + mutex_lock(&hal->tx_power_mutex); + for (path = RF_PATH_A; path <= RF_PATH_B; path++) { + /* there is no CCK rates used in 5G */ + if (hal->current_band_type == RTW_BAND_5G) + rate = DESC_RATE6M; + else + rate = DESC_RATE1M; + + /* now, not support vht 3ss and vht 4ss*/ + for (; rate <= DESC_RATEVHT2SS_MCS9; rate++) { + /* now, not support ht 3ss and ht 4ss*/ + if (rate > DESC_RATEMCS15 && + rate < DESC_RATEVHT1SS_MCS0) + continue; + + switch (rate) { + case DESC_RATE1M...DESC_RATE11M: + print_rate = rtw_print_cck_rate_txt; + break; + case DESC_RATE6M...DESC_RATE54M: + print_rate = rtw_print_ofdm_rate_txt; + break; + case DESC_RATEMCS0...DESC_RATEMCS15: + print_rate = rtw_print_ht_rate_txt; + break; + case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9: + print_rate = rtw_print_vht_rate_txt; + break; + default: + print_rate = NULL; + break; + } + + rtw_get_tx_power_params(rtwdev, path, rate, bw, + ch, regd, &pwr_param); + + seq_printf(m, "%4c ", path + 'A'); + if (print_rate) + print_rate(m, rate); + seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d)\n", + hal->tx_pwr_tbl[path][rate], + hal->tx_pwr_tbl[path][rate], + pwr_param.pwr_base, + min_t(s8, pwr_param.pwr_offset, + pwr_param.pwr_limit), + pwr_param.pwr_offset, pwr_param.pwr_limit); + } + } + + mutex_unlock(&hal->tx_power_mutex); + + return 0; +} + #define rtw_debug_impl_mac(page, addr) \ static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \ .cb_read = rtw_debug_get_mac_page, \ @@ -514,6 +621,10 @@ static struct rtw_debugfs_priv rtw_debug_priv_rf_dump = { .cb_read = rtw_debug_get_rf_dump, }; +static struct rtw_debugfs_priv rtw_debug_priv_tx_pwr_tbl = { + .cb_read = rtw_debugfs_get_tx_pwr_tbl, +}; + static struct rtw_debugfs_priv rtw_debug_priv_write_reg = { .cb_write = rtw_debugfs_set_write_reg, }; @@ -561,7 +672,7 @@ static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = { void rtw_debugfs_init(struct rtw_dev *rtwdev) { - struct dentry *debugfs_topdir = rtwdev->debugfs; + struct dentry *debugfs_topdir; debugfs_topdir = debugfs_create_dir("rtw88", rtwdev->hw->wiphy->debugfsdir); @@ -610,6 +721,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev) rtw_debugfs_add_r(bb_41); } rtw_debugfs_add_r(rf_dump); + rtw_debugfs_add_r(tx_pwr_tbl); } #endif /* CONFIG_RTW88_DEBUGFS */ diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 628477971213..b082e2cc95f5 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -3,6 +3,7 @@ */ #include "main.h" +#include "coex.h" #include "fw.h" #include "tx.h" #include "reg.h" @@ -36,17 +37,51 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb) c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset); len = skb->len - pkt_offset - 2; - rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n", - c2h->id, c2h->seq, len); + mutex_lock(&rtwdev->mutex); switch (c2h->id) { + case C2H_BT_INFO: + rtw_coex_bt_info_notify(rtwdev, c2h->payload, len); + break; + case C2H_WLAN_INFO: + rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len); + break; case C2H_HALMAC: rtw_fw_c2h_cmd_handle_ext(rtwdev, skb); break; default: break; } + + mutex_unlock(&rtwdev->mutex); +} + +void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset, + struct sk_buff *skb) +{ + struct rtw_c2h_cmd *c2h; + u8 len; + + c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset); + len = skb->len - pkt_offset - 2; + *((u32 *)skb->cb) = pkt_offset; + + rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n", + c2h->id, c2h->seq, len); + + switch (c2h->id) { + case C2H_BT_MP_INFO: + rtw_coex_info_response(rtwdev, skb); + break; + default: + /* pass offset for further operation */ + *((u32 *)skb->cb) = pkt_offset; + skb_queue_tail(&rtwdev->c2h_queue, skb); + ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); + break; + } } +EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe); static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, u8 *h2c) @@ -181,6 +216,102 @@ void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para) rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); } +void rtw_fw_query_bt_info(struct rtw_dev *rtwdev) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO); + + SET_QUERY_BT_INFO(h2c_pkt, true); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO); + + SET_WL_CH_INFO_LINK(h2c_pkt, link); + SET_WL_CH_INFO_CHNL(h2c_pkt, ch); + SET_WL_CH_INFO_BW(h2c_pkt, bw); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev, + struct rtw_coex_info_req *req) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO); + + SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq); + SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code); + SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1); + SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2); + SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + u8 index = 0 - bt_pwr_dec_lvl; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER); + + SET_BT_TX_POWER_INDEX(h2c_pkt, index); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION); + + SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev, + u8 para1, u8 para2, u8 para3, u8 para4, u8 para5) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE); + + SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1); + SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2); + SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3); + SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4); + SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + +void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data) +{ + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL); + + SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code); + + SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data); + SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1)); + SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2)); + SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3)); + SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4)); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); +} + void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 703466393ecb..e95d85bd097f 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -35,7 +35,9 @@ enum rtw_c2h_cmd_id { C2H_BT_INFO = 0x09, + C2H_BT_MP_INFO = 0x0b, C2H_HW_FEATURE_REPORT = 0x19, + C2H_WLAN_INFO = 0x27, C2H_HW_FEATURE_DUMP = 0xfd, C2H_HALMAC = 0xff, }; @@ -71,6 +73,14 @@ enum rtw_fw_rf_type { FW_RF_MAX_TYPE = 0xF, }; +struct rtw_coex_info_req { + u8 seq; + u8 op_code; + u8 para1; + u8 para2; + u8 para3; +}; + struct rtw_iqk_para { u8 clear; u8 segment_iqk; @@ -139,6 +149,14 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) #define H2C_CMD_RA_INFO 0x40 #define H2C_CMD_RSSI_MONITOR 0x42 +#define H2C_CMD_COEX_TDMA_TYPE 0x60 +#define H2C_CMD_QUERY_BT_INFO 0x61 +#define H2C_CMD_FORCE_BT_TX_POWER 0x62 +#define H2C_CMD_IGNORE_WLAN_ACTION 0x63 +#define H2C_CMD_WL_CH_INFO 0x66 +#define H2C_CMD_QUERY_BT_MP_INFO 0x67 +#define H2C_CMD_BT_WIFI_CONTROL 0x69 + #define SET_H2C_CMD_ID_CLASS(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(7, 0)) @@ -191,6 +209,50 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16)) #define SET_RA_INFO_RA_MASK3(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(31, 24)) +#define SET_QUERY_BT_INFO(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define SET_WL_CH_INFO_LINK(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_WL_CH_INFO_CHNL(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_WL_CH_INFO_BW(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_BT_MP_INFO_SEQ(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 12)) +#define SET_BT_MP_INFO_OP_CODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_BT_MP_INFO_PARA1(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_BT_MP_INFO_PARA2(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) +#define SET_BT_MP_INFO_PARA3(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8)) +#define SET_BT_TX_POWER_INDEX(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) +#define SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) +#define SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8)) +#define SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8)) +#define SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24)) +#define SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0)) +#define SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8)) +#define SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16)) static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb) { @@ -200,12 +262,23 @@ static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb) return (struct rtw_c2h_cmd *)(skb->data + pkt_offset); } +void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset, + struct sk_buff *skb); void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb); void rtw_fw_send_general_info(struct rtw_dev *rtwdev); void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev); void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para); void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev); +void rtw_fw_query_bt_info(struct rtw_dev *rtwdev); +void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw); +void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev, + struct rtw_coex_info_req *req); +void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl); +void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable); +void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev, + u8 para1, u8 para2, u8 para3, u8 para4, u8 para5); +void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data); void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn); diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index abe6a148673b..e5e3605bb693 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -7,6 +7,7 @@ #include "tx.h" #include "fw.h" #include "mac.h" +#include "coex.h" #include "ps.h" #include "reg.h" #include "debug.h" @@ -253,8 +254,9 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, enum rtw_net_type net_type; if (conf->assoc) { + rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH); net_type = RTW_NET_MGD_LINKED; - chip->ops->do_iqk(rtwdev); + chip->ops->phy_calibration(rtwdev); rtwvif->aid = conf->aid; rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true); @@ -262,6 +264,7 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, rtw_add_rsvd_page(rtwdev, RSVD_NULL, true); rtw_fw_download_rsvd_page(rtwdev, vif); rtw_send_rsvd_page_h2c(rtwdev); + rtw_coex_media_status_notify(rtwdev, conf->assoc); } else { net_type = RTW_NET_NO_LINK; rtwvif->aid = 0; @@ -469,6 +472,8 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw, config |= PORT_SET_MAC_ADDR; rtw_vif_port_config(rtwdev, rtwvif, config); + rtw_coex_scan_notify(rtwdev, COEX_SCAN_START); + rtw_flag_set(rtwdev, RTW_FLAG_DIG_DISABLE); rtw_flag_set(rtwdev, RTW_FLAG_SCANNING); @@ -491,6 +496,19 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw, config |= PORT_SET_MAC_ADDR; rtw_vif_port_config(rtwdev, rtwvif, config); + rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH); + + mutex_unlock(&rtwdev->mutex); +} + +static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 duration) +{ + struct rtw_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_START); mutex_unlock(&rtwdev->mutex); } @@ -509,5 +527,6 @@ const struct ieee80211_ops rtw_ops = { .ampdu_action = rtw_ops_ampdu_action, .sw_scan_start = rtw_ops_sw_scan_start, .sw_scan_complete = rtw_ops_sw_scan_complete, + .mgd_prepare_tx = rtw_ops_mgd_prepare_tx, }; EXPORT_SYMBOL(rtw_ops); diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 5a2c06267d07..fc8f6213fc8f 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -8,6 +8,7 @@ #include "ps.h" #include "sec.h" #include "mac.h" +#include "coex.h" #include "phy.h" #include "reg.h" #include "efuse.h" @@ -149,6 +150,7 @@ static void rtw_watch_dog_work(struct work_struct *work) struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, watch_dog_work.work); struct rtw_watch_dog_iter_data data = {}; + bool busy_traffic = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC); if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING)) return; @@ -156,6 +158,14 @@ static void rtw_watch_dog_work(struct work_struct *work) ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work, RTW_WATCH_DOG_DELAY_TIME); + if (rtwdev->stats.tx_cnt > 100 || rtwdev->stats.rx_cnt > 100) + rtw_flag_set(rtwdev, RTW_FLAG_BUSY_TRAFFIC); + else + rtw_flag_clear(rtwdev, RTW_FLAG_BUSY_TRAFFIC); + + if (busy_traffic != rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC)) + rtw_coex_wl_status_change_notify(rtwdev); + /* reset tx/rx statictics */ rtwdev->stats.tx_unicast = 0; rtwdev->stats.rx_unicast = 0; @@ -298,6 +308,15 @@ void rtw_set_channel(struct rtw_dev *rtwdev) chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx); + if (hal->current_band_type == RTW_BAND_5G) { + rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G); + } else { + if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING)) + rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G); + else + rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G_NOFORSCAN); + } + rtw_phy_set_tx_power_level(rtwdev, center_chan); } @@ -641,6 +660,7 @@ static int rtw_power_on(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw = &rtwdev->fw; + bool wifi_only; int ret; ret = rtw_hci_setup(rtwdev); @@ -684,6 +704,10 @@ static int rtw_power_on(struct rtw_dev *rtwdev) goto err_off; } + wifi_only = !rtwdev->efuse.btcoex; + rtw_coex_power_on_setting(rtwdev); + rtw_coex_init_hw_config(rtwdev, wifi_only); + return 0; err_off: @@ -722,10 +746,15 @@ static void rtw_power_off(struct rtw_dev *rtwdev) void rtw_core_stop(struct rtw_dev *rtwdev) { + struct rtw_coex *coex = &rtwdev->coex; + rtw_flag_clear(rtwdev, RTW_FLAG_RUNNING); rtw_flag_clear(rtwdev, RTW_FLAG_FW_RUNNING); cancel_delayed_work_sync(&rtwdev->watch_dog_work); + cancel_delayed_work_sync(&coex->bt_relink_work); + cancel_delayed_work_sync(&coex->bt_reenable_work); + cancel_delayed_work_sync(&coex->defreeze_work); rtw_power_off(rtwdev); } @@ -876,7 +905,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; struct rtw_hal *hal = &rtwdev->hal; struct rtw_efuse *efuse = &rtwdev->efuse; - u32 wl_bt_pwr_ctrl; int ret = 0; switch (rtw_hci_type(rtwdev)) { @@ -888,9 +916,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev) return -EINVAL; } - wl_bt_pwr_ctrl = rtw_read32(rtwdev, REG_WL_BT_PWR_CTRL); - if (wl_bt_pwr_ctrl & BIT_BT_FUNC_EN) - rtwdev->efuse.btcoex = true; hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1); hal->fab_version = BIT_GET_VENDOR_ID(hal->chip_version) >> 2; hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version); @@ -1044,11 +1069,14 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev) efuse->lna_type_5g = 0; if (efuse->channel_plan == 0xff) efuse->channel_plan = 0x7f; + if (efuse->rf_board_option == 0xff) + efuse->rf_board_option = 0; if (efuse->bt_setting & BIT(0)) efuse->share_ant = true; if (efuse->regd == 0xff) efuse->regd = 0; + efuse->btcoex = (efuse->rf_board_option & 0xe0) == 0x20; efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0; efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0; efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0; @@ -1111,6 +1139,7 @@ EXPORT_SYMBOL(rtw_chip_info_setup); int rtw_core_init(struct rtw_dev *rtwdev) { + struct rtw_coex *coex = &rtwdev->coex; int ret; INIT_LIST_HEAD(&rtwdev->rsvd_page_list); @@ -1120,8 +1149,12 @@ int rtw_core_init(struct rtw_dev *rtwdev) INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work); INIT_DELAYED_WORK(&rtwdev->lps_work, rtw_lps_work); + INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work); + INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work); + INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work); INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work); skb_queue_head_init(&rtwdev->c2h_queue); + skb_queue_head_init(&rtwdev->coex.queue); skb_queue_head_init(&rtwdev->tx_report.queue); spin_lock_init(&rtwdev->dm_lock); @@ -1130,8 +1163,11 @@ int rtw_core_init(struct rtw_dev *rtwdev) spin_lock_init(&rtwdev->tx_report.q_lock); mutex_init(&rtwdev->mutex); + mutex_init(&rtwdev->coex.mutex); mutex_init(&rtwdev->hal.tx_power_mutex); + init_waitqueue_head(&rtwdev->coex.wait); + rtwdev->sec.total_cam_num = 32; rtwdev->hal.current_channel = 1; set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map); @@ -1174,6 +1210,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev) } mutex_destroy(&rtwdev->mutex); + mutex_destroy(&rtwdev->coex.mutex); mutex_destroy(&rtwdev->hal.tx_power_mutex); } EXPORT_SYMBOL(rtw_core_deinit); @@ -1199,6 +1236,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); + ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 8fa05751836b..bede3f38516e 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -310,6 +310,7 @@ enum rtw_flags { RTW_FLAG_INACTIVE_PS, RTW_FLAG_LEISURE_PS, RTW_FLAG_DIG_DISABLE, + RTW_FLAG_BUSY_TRAFFIC, NUM_OF_RTW_FLAGS, }; @@ -639,7 +640,19 @@ struct rtw_chip_ops { u8 antenna_rx); void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable); void (*false_alarm_statistics)(struct rtw_dev *rtwdev); - void (*do_iqk)(struct rtw_dev *rtwdev); + void (*phy_calibration)(struct rtw_dev *rtwdev); + void (*dpk_track)(struct rtw_dev *rtwdev); + void (*cck_pd_set)(struct rtw_dev *rtwdev, u8 level); + + /* for coex */ + void (*coex_set_init)(struct rtw_dev *rtwdev); + void (*coex_set_ant_switch)(struct rtw_dev *rtwdev, + u8 ctrl_type, u8 pos_type); + void (*coex_set_gnt_fix)(struct rtw_dev *rtwdev); + void (*coex_set_gnt_debug)(struct rtw_dev *rtwdev); + void (*coex_set_rfe_type)(struct rtw_dev *rtwdev); + void (*coex_set_wl_tx_power)(struct rtw_dev *rtwdev, u8 wl_pwr); + void (*coex_set_wl_rx_gain)(struct rtw_dev *rtwdev, bool low_gain); }; #define RTW_PWR_POLLING_CNT 20000 @@ -852,6 +865,257 @@ struct rtw_chip_info { const struct rtw_rfe_def *rfe_defs; u32 rfe_defs_size; + + bool en_dis_dpd; + u16 dpd_ratemask; + + /* coex paras */ + u32 coex_para_ver; + u8 bt_desired_ver; + bool scbd_support; + bool new_scbd10_def; /* true: fix 2M(8822c) */ + u8 pstdma_type; /* 0: LPSoff, 1:LPSon */ + u8 bt_rssi_type; + u8 ant_isolation; + u8 rssi_tolerance; + u8 table_sant_num; + u8 table_nsant_num; + u8 tdma_sant_num; + u8 tdma_nsant_num; + u8 bt_afh_span_bw20; + u8 bt_afh_span_bw40; + u8 afh_5g_num; + u8 wl_rf_para_num; + const u8 *bt_rssi_step; + const u8 *wl_rssi_step; + const struct coex_table_para *table_nsant; + const struct coex_table_para *table_sant; + const struct coex_tdma_para *tdma_sant; + const struct coex_tdma_para *tdma_nsant; + const struct coex_rf_para *wl_rf_para_tx; + const struct coex_rf_para *wl_rf_para_rx; + const struct coex_5g_afh_map *afh_5g; +}; + +enum rtw_coex_bt_state_cnt { + COEX_CNT_BT_RETRY, + COEX_CNT_BT_REINIT, + COEX_CNT_BT_REENABLE, + COEX_CNT_BT_POPEVENT, + COEX_CNT_BT_SETUPLINK, + COEX_CNT_BT_IGNWLANACT, + COEX_CNT_BT_INQ, + COEX_CNT_BT_PAGE, + COEX_CNT_BT_ROLESWITCH, + COEX_CNT_BT_AFHUPDATE, + COEX_CNT_BT_INFOUPDATE, + COEX_CNT_BT_IQK, + COEX_CNT_BT_IQKFAIL, + + COEX_CNT_BT_MAX +}; + +enum rtw_coex_wl_state_cnt { + COEX_CNT_WL_CONNPKT, + COEX_CNT_WL_COEXRUN, + COEX_CNT_WL_NOISY0, + COEX_CNT_WL_NOISY1, + COEX_CNT_WL_NOISY2, + COEX_CNT_WL_5MS_NOEXTEND, + COEX_CNT_WL_FW_NOTIFY, + + COEX_CNT_WL_MAX +}; + +struct rtw_coex_rfe { + bool ant_switch_exist; + bool ant_switch_diversity; + bool ant_switch_with_bt; + u8 rfe_module_type; + u8 ant_switch_polarity; + + /* true if WLG at BTG, else at WLAG */ + bool wlg_at_btg; +}; + +struct rtw_coex_dm { + bool cur_ps_tdma_on; + bool cur_wl_rx_low_gain_en; + + u8 reason; + u8 bt_rssi_state[4]; + u8 wl_rssi_state[4]; + u8 wl_ch_info[3]; + u8 cur_ps_tdma; + u8 cur_table; + u8 ps_tdma_para[5]; + u8 cur_bt_pwr_lvl; + u8 cur_bt_lna_lvl; + u8 cur_wl_pwr_lvl; + u8 bt_status; + u32 cur_ant_pos_type; + u32 cur_switch_status; + u32 setting_tdma; +}; + +#define COEX_BTINFO_SRC_WL_FW 0x0 +#define COEX_BTINFO_SRC_BT_RSP 0x1 +#define COEX_BTINFO_SRC_BT_ACT 0x2 +#define COEX_BTINFO_SRC_BT_IQK 0x3 +#define COEX_BTINFO_SRC_BT_SCBD 0x4 +#define COEX_BTINFO_SRC_MAX 0x5 + +#define COEX_INFO_FTP BIT(7) +#define COEX_INFO_A2DP BIT(6) +#define COEX_INFO_HID BIT(5) +#define COEX_INFO_SCO_BUSY BIT(4) +#define COEX_INFO_ACL_BUSY BIT(3) +#define COEX_INFO_INQ_PAGE BIT(2) +#define COEX_INFO_SCO_ESCO BIT(1) +#define COEX_INFO_CONNECTION BIT(0) +#define COEX_BTINFO_LENGTH_MAX 10 + +struct rtw_coex_stat { + bool bt_disabled; + bool bt_disabled_pre; + bool bt_link_exist; + bool bt_whck_test; + bool bt_inq_page; + bool bt_inq; + bool bt_page; + bool bt_ble_voice; + bool bt_ble_exist; + bool bt_hfp_exist; + bool bt_a2dp_exist; + bool bt_hid_exist; + bool bt_pan_exist; /* PAN or OPP */ + bool bt_opp_exist; /* OPP only */ + bool bt_acl_busy; + bool bt_fix_2M; + bool bt_setup_link; + bool bt_multi_link; + bool bt_a2dp_sink; + bool bt_a2dp_active; + bool bt_reenable; + bool bt_ble_scan_en; + bool bt_init_scan; + bool bt_slave; + bool bt_418_hid_exist; + bool bt_mailbox_reply; + + bool wl_under_lps; + bool wl_under_ips; + bool wl_hi_pri_task1; + bool wl_hi_pri_task2; + bool wl_force_lps_ctrl; + bool wl_gl_busy; + bool wl_linkscan_proc; + bool wl_ps_state_fail; + bool wl_tx_limit_en; + bool wl_ampdu_limit_en; + bool wl_connected; + bool wl_slot_extend; + bool wl_cck_lock; + bool wl_cck_lock_pre; + bool wl_cck_lock_ever; + + u32 bt_supported_version; + u32 bt_supported_feature; + s8 bt_rssi; + u8 kt_ver; + u8 gnt_workaround_state; + u8 tdma_timer_base; + u8 bt_profile_num; + u8 bt_info_c2h[COEX_BTINFO_SRC_MAX][COEX_BTINFO_LENGTH_MAX]; + u8 bt_info_lb2; + u8 bt_info_lb3; + u8 bt_info_hb0; + u8 bt_info_hb1; + u8 bt_info_hb2; + u8 bt_info_hb3; + u8 bt_ble_scan_type; + u8 bt_hid_pair_num; + u8 bt_hid_slot; + u8 bt_a2dp_bitpool; + u8 bt_iqk_state; + + u8 wl_noisy_level; + u8 wl_fw_dbg_info[10]; + u8 wl_fw_dbg_info_pre[10]; + u8 wl_coex_mode; + u8 ampdu_max_time; + u8 wl_tput_dir; + + u16 score_board; + u16 retry_limit; + + /* counters to record bt states */ + u32 cnt_bt[COEX_CNT_BT_MAX]; + + /* counters to record wifi states */ + u32 cnt_wl[COEX_CNT_WL_MAX]; + + u32 darfrc; + u32 darfrch; +}; + +struct rtw_coex { + /* protects coex info request section */ + struct mutex mutex; + struct sk_buff_head queue; + wait_queue_head_t wait; + + bool under_5g; + bool stop_dm; + bool freeze; + bool freerun; + bool wl_rf_off; + + struct rtw_coex_stat stat; + struct rtw_coex_dm dm; + struct rtw_coex_rfe rfe; + + struct delayed_work bt_relink_work; + struct delayed_work bt_reenable_work; + struct delayed_work defreeze_work; +}; + +#define DPK_RF_REG_NUM 7 +#define DPK_RF_PATH_NUM 2 +#define DPK_BB_REG_NUM 18 +#define DPK_CHANNEL_WIDTH_80 1 + +DECLARE_EWMA(thermal, 10, 4); + +struct rtw_dpk_info { + bool is_dpk_pwr_on; + bool is_reload; + + DECLARE_BITMAP(dpk_path_ok, DPK_RF_PATH_NUM); + + u8 thermal_dpk[DPK_RF_PATH_NUM]; + struct ewma_thermal avg_thermal[DPK_RF_PATH_NUM]; + + u32 gnt_control; + u32 gnt_value; + + u8 result[RTW_RF_PATH_MAX]; + u8 dpk_txagc[RTW_RF_PATH_MAX]; + u32 coef[RTW_RF_PATH_MAX][20]; + u16 dpk_gs[RTW_RF_PATH_MAX]; + u8 thermal_dpk_delta[RTW_RF_PATH_MAX]; + u8 pre_pwsf[RTW_RF_PATH_MAX]; + + u8 dpk_band; + u8 dpk_ch; + u8 dpk_bw; +}; + +struct rtw_phy_cck_pd_reg { + u32 reg_pd; + u32 mask_pd; + u32 reg_cs; + u32 mask_cs; }; #define DACK_MSBK_BACKUP_NUM 0xf @@ -861,6 +1125,16 @@ struct rtw_dm_info { u32 cck_fa_cnt; u32 ofdm_fa_cnt; u32 total_fa_cnt; + + u32 cck_ok_cnt; + u32 cck_err_cnt; + u32 ofdm_ok_cnt; + u32 ofdm_err_cnt; + u32 ht_ok_cnt; + u32 ht_err_cnt; + u32 vht_ok_cnt; + u32 vht_err_cnt; + u8 min_rssi; u8 pre_min_rssi; u16 fa_history[4]; @@ -877,6 +1151,12 @@ struct rtw_dm_info { u32 dack_adck[RTW_RF_PATH_MAX]; u16 dack_msbk[RTW_RF_PATH_MAX][2][DACK_MSBK_BACKUP_NUM]; u8 dack_dck[RTW_RF_PATH_MAX][2][DACK_DCK_BACKUP_NUM]; + + struct rtw_dpk_info dpk_info; + + /* [bandwidth 0:20M/1:40M][number of path] */ + u8 cck_pd_lv[2][RTW_RF_PATH_MAX]; + u32 cck_fa_avg; }; struct rtw_efuse { @@ -888,6 +1168,7 @@ struct rtw_efuse { u8 addr[ETH_ALEN]; u8 channel_plan; u8 country_code[2]; + u8 rf_board_option; u8 rfe_option; u8 thermal_meter; u8 crystal_cap; @@ -1047,6 +1328,7 @@ struct rtw_dev { struct rtw_regulatory regd; struct rtw_dm_info dm_info; + struct rtw_coex coex; /* ensures exclusive access from mac80211 callbacks */ struct mutex mutex; @@ -1111,6 +1393,11 @@ static inline void rtw_flag_set(struct rtw_dev *rtwdev, enum rtw_flags flag) set_bit(flag, rtwdev->flags); } +static inline bool rtw_is_assoc(struct rtw_dev *rtwdev) +{ + return !!rtwdev->sta_cnt; +} + void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *ch_param); bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target); diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 353871c27779..3fdb52a5789a 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -8,8 +8,13 @@ #include "pci.h" #include "tx.h" #include "rx.h" +#include "fw.h" #include "debug.h" +static bool rtw_disable_msi; +module_param_named(disable_msi, rtw_disable_msi, bool, 0644); +MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support"); + static u32 rtw_pci_tx_queue_idx_addr[] = { [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, @@ -206,6 +211,23 @@ static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, return 0; } +static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma, + struct rtw_pci_rx_ring *rx_ring, + u32 idx, u32 desc_sz) +{ + struct device *dev = rtwdev->dev; + struct rtw_pci_rx_buffer_desc *buf_desc; + int buf_sz = RTK_PCI_RX_BUF_SIZE; + + dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE); + + buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + + idx * desc_sz); + memset(buf_desc, 0, sizeof(*buf_desc)); + buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); + buf_desc->dma = cpu_to_le32(dma); +} + static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, struct rtw_pci_rx_ring *rx_ring, u8 desc_size, u32 len) @@ -765,6 +787,7 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, u32 pkt_offset; u32 pkt_desc_sz = chip->rx_pkt_desc_sz; u32 buf_desc_sz = chip->rx_buf_desc_sz; + u32 new_len; u8 *rx_desc; dma_addr_t dma; @@ -783,8 +806,8 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, rtw_pci_dma_check(rtwdev, ring, cur_rp); skb = ring->buf[cur_rp]; dma = *((dma_addr_t *)skb->cb); - pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, + DMA_FROM_DEVICE); rx_desc = skb->data; chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); @@ -792,40 +815,32 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + pkt_stat.shift; - if (pkt_stat.is_c2h) { - /* keep rx_desc, halmac needs it */ - skb_put(skb, pkt_stat.pkt_len + pkt_offset); + /* allocate a new skb for this frame, + * discard the frame if none available + */ + new_len = pkt_stat.pkt_len + pkt_offset; + new = dev_alloc_skb(new_len); + if (WARN_ONCE(!new, "rx routine starvation\n")) + goto next_rp; + + /* put the DMA data including rx_desc from phy to new skb */ + skb_put_data(new, skb->data, new_len); - /* pass offset for further operation */ - *((u32 *)skb->cb) = pkt_offset; - skb_queue_tail(&rtwdev->c2h_queue, skb); - ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); + if (pkt_stat.is_c2h) { + rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new); } else { - /* remove rx_desc, maybe use skb_pull? */ - skb_put(skb, pkt_stat.pkt_len); - skb_reserve(skb, pkt_offset); - - /* alloc a smaller skb to mac80211 */ - new = dev_alloc_skb(pkt_stat.pkt_len); - if (!new) { - new = skb; - } else { - skb_put_data(new, skb->data, skb->len); - dev_kfree_skb_any(skb); - } - /* TODO: merge into rx.c */ - rtw_rx_stats(rtwdev, pkt_stat.vif, skb); + /* remove rx_desc */ + skb_pull(new, pkt_offset); + + rtw_rx_stats(rtwdev, pkt_stat.vif, new); memcpy(new->cb, &rx_status, sizeof(rx_status)); ieee80211_rx_irqsafe(rtwdev->hw, new); } - /* skb delivered to mac80211, alloc a new one in rx ring */ - new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE); - if (WARN(!new, "rx routine starvation\n")) - return; - - ring->buf[cur_rp] = new; - rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz); +next_rp: + /* new skb delivered to mac80211, re-enable original skb DMA */ + rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp, + buf_desc_sz); /* host read next element in ring */ if (++cur_rp >= ring->r.len) @@ -855,12 +870,34 @@ static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) { struct rtw_dev *rtwdev = dev; struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; - u32 irq_status[4]; spin_lock(&rtwpci->irq_lock); if (!rtwpci->irq_enabled) goto out; + /* disable RTW PCI interrupt to avoid more interrupts before the end of + * thread function + * + * disable HIMR here to also avoid new HISR flag being raised before + * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs + * are cleared, the edge-triggered interrupt will not be generated when + * a new HISR flag is set. + */ + rtw_pci_disable_interrupt(rtwdev, rtwpci); +out: + spin_unlock(&rtwpci->irq_lock); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) +{ + struct rtw_dev *rtwdev = dev; + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + unsigned long flags; + u32 irq_status[4]; + + spin_lock_irqsave(&rtwpci->irq_lock, flags); rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status); if (irq_status[0] & IMR_MGNTDOK) @@ -880,8 +917,9 @@ static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) if (irq_status[0] & IMR_ROK) rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU); -out: - spin_unlock(&rtwpci->irq_lock); + /* all of the jobs for this interrupt have been done */ + rtw_pci_enable_interrupt(rtwdev, rtwpci); + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); return IRQ_HANDLED; } @@ -977,7 +1015,6 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) u16 cut; u16 value; u16 offset; - u16 ip_sel; int i; cut = BIT(0) << rtwdev->hal.cut_version; @@ -990,7 +1027,6 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) break; offset = para->offset; value = para->value; - ip_sel = para->ip_sel; if (para->ip_sel == RTW_IP_SEL_PHY) rtw_mdio_write(rtwdev, offset, value, true); else @@ -1005,7 +1041,6 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) break; offset = para->offset; value = para->value; - ip_sel = para->ip_sel; if (para->ip_sel == RTW_IP_SEL_PHY) rtw_mdio_write(rtwdev, offset, value, false); else @@ -1090,6 +1125,38 @@ static struct rtw_hci_ops rtw_pci_ops = { .write_data_h2c = rtw_pci_write_data_h2c, }; +static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) +{ + unsigned int flags = PCI_IRQ_LEGACY; + int ret; + + if (!rtw_disable_msi) + flags |= PCI_IRQ_MSI; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); + if (ret < 0) { + rtw_err(rtwdev, "failed to alloc PCI irq vectors\n"); + return ret; + } + + ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, + rtw_pci_interrupt_handler, + rtw_pci_interrupt_threadfn, + IRQF_SHARED, KBUILD_MODNAME, rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to request irq %d\n", ret); + pci_free_irq_vectors(pdev); + } + + return ret; +} + +static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) +{ + devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); + pci_free_irq_vectors(pdev); +} + static int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -1144,8 +1211,7 @@ static int rtw_pci_probe(struct pci_dev *pdev, goto err_destroy_pci; } - ret = request_irq(pdev->irq, &rtw_pci_interrupt_handler, - IRQF_SHARED, KBUILD_MODNAME, rtwdev); + ret = rtw_pci_request_irq(rtwdev, pdev); if (ret) { ieee80211_unregister_hw(hw); goto err_destroy_pci; @@ -1184,7 +1250,7 @@ static void rtw_pci_remove(struct pci_dev *pdev) rtw_pci_disable_interrupt(rtwdev, rtwpci); rtw_pci_destroy(rtwdev, pdev); rtw_pci_declaim(rtwdev, pdev); - free_irq(rtwpci->pdev->irq, rtwdev); + rtw_pci_free_irq(rtwdev, pdev); rtw_core_deinit(rtwdev); ieee80211_free_hw(hw); } diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index 4ec8dcf17361..d3d3f40de75e 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -29,15 +29,6 @@ struct phy_pg_cfg_pair { u32 data; }; -struct txpwr_lmt_cfg_pair { - u8 regd; - u8 band; - u8 bw; - u8 rs; - u8 ch; - s8 txpwr_lmt; -}; - static const u32 db_invert_table[12][8] = { {10, 13, 16, 20, 25, 32, 40, 50}, @@ -120,6 +111,19 @@ enum rtw_phy_band_type { PHY_BAND_5G = 1, }; +static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 i, j; + + for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) { + for (j = 0; j < RTW_RF_PATH_MAX; j++) + dm_info->cck_pd_lv[i][j] = 0; + } + + dm_info->cck_fa_avg = CCK_FA_AVG_RESET; +} + void rtw_phy_init(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; @@ -138,6 +142,7 @@ void rtw_phy_init(struct rtw_dev *rtwdev) addr = chip->dig[0].addr; mask = chip->dig[0].mask; dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask); + rtw_phy_cck_pd_init(rtwdev); } void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi) @@ -448,12 +453,100 @@ static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev) rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev); } +static void rtw_phy_dpk_track(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (chip->ops->dpk_track) + chip->ops->dpk_track(rtwdev); +} + +#define CCK_PD_LV_MAX 5 +#define CCK_PD_FA_LV1_MIN 1000 +#define CCK_PD_FA_LV0_MAX 500 + +static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u32 cck_fa_avg = dm_info->cck_fa_avg; + + if (cck_fa_avg > CCK_PD_FA_LV1_MIN) + return 1; + + if (cck_fa_avg < CCK_PD_FA_LV0_MAX) + return 0; + + return CCK_PD_LV_MAX; +} + +#define CCK_PD_IGI_LV4_VAL 0x38 +#define CCK_PD_IGI_LV3_VAL 0x2a +#define CCK_PD_IGI_LV2_VAL 0x24 +#define CCK_PD_RSSI_LV4_VAL 32 +#define CCK_PD_RSSI_LV3_VAL 32 +#define CCK_PD_RSSI_LV2_VAL 24 + +static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 igi = dm_info->igi_history[0]; + u8 rssi = dm_info->min_rssi; + u32 cck_fa_avg = dm_info->cck_fa_avg; + + if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL) + return 4; + if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL) + return 3; + if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL) + return 2; + if (cck_fa_avg > CCK_PD_FA_LV1_MIN) + return 1; + if (cck_fa_avg < CCK_PD_FA_LV0_MAX) + return 0; + + return CCK_PD_LV_MAX; +} + +static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev) +{ + if (!rtw_is_assoc(rtwdev)) + return rtw_phy_cck_pd_lv_unlink(rtwdev); + else + return rtw_phy_cck_pd_lv_link(rtwdev); +} + +static void rtw_phy_cck_pd(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_chip_info *chip = rtwdev->chip; + u32 cck_fa = dm_info->cck_fa_cnt; + u8 level; + + if (rtwdev->hal.current_band_type != RTW_BAND_2G) + return; + + if (dm_info->cck_fa_avg == CCK_FA_AVG_RESET) + dm_info->cck_fa_avg = cck_fa; + else + dm_info->cck_fa_avg = (dm_info->cck_fa_avg * 3 + cck_fa) >> 2; + + level = rtw_phy_cck_pd_lv(rtwdev); + + if (level >= CCK_PD_LV_MAX) + return; + + if (chip->ops->cck_pd_set) + chip->ops->cck_pd_set(rtwdev, level); +} + void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev) { /* for further calculation */ rtw_phy_statistics(rtwdev); rtw_phy_dig(rtwdev); + rtw_phy_cck_pd(rtwdev); rtw_phy_ra_info_update(rtwdev); + rtw_phy_dpk_track(rtwdev); } #define FRAC_BITS 3 @@ -1267,10 +1360,8 @@ static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev) void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, const struct rtw_table *tbl) { - const struct txpwr_lmt_cfg_pair *p = tbl->data; - const struct txpwr_lmt_cfg_pair *end = p + tbl->size / 6; - - BUILD_BUG_ON(sizeof(struct txpwr_lmt_cfg_pair) != sizeof(u8) * 6); + const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data; + const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size; for (; p < end; p++) { rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band, @@ -1327,11 +1418,20 @@ void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl, static void rtw_load_rfk_table(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; if (!chip->rfk_init_tbl) return; + rtw_write32_mask(rtwdev, 0x1e24, BIT(17), 0x1); + rtw_write32_mask(rtwdev, 0x1cd0, BIT(28), 0x1); + rtw_write32_mask(rtwdev, 0x1cd0, BIT(29), 0x1); + rtw_write32_mask(rtwdev, 0x1cd0, BIT(30), 0x1); + rtw_write32_mask(rtwdev, 0x1cd0, BIT(31), 0x0); + rtw_load_table(rtwdev, chip->rfk_init_tbl); + + dpk_info->is_dpk_pwr_on = 1; } void rtw_phy_load_tables(struct rtw_dev *rtwdev) @@ -1441,6 +1541,37 @@ static u8 rtw_get_channel_group(u8 channel) } } +static s8 rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev *rtwdev, u16 rate) +{ + struct rtw_chip_info *chip = rtwdev->chip; + s8 dpd_diff = 0; + + if (!chip->en_dis_dpd) + return 0; + +#define RTW_DPD_RATE_CHECK(_rate) \ + case DESC_RATE ## _rate: \ + if (DIS_DPD_RATE ## _rate & chip->dpd_ratemask) \ + dpd_diff = -6 * chip->txgi_factor; \ + break + + switch (rate) { + RTW_DPD_RATE_CHECK(6M); + RTW_DPD_RATE_CHECK(9M); + RTW_DPD_RATE_CHECK(MCS0); + RTW_DPD_RATE_CHECK(MCS1); + RTW_DPD_RATE_CHECK(MCS8); + RTW_DPD_RATE_CHECK(MCS9); + RTW_DPD_RATE_CHECK(VHT1SS_MCS0); + RTW_DPD_RATE_CHECK(VHT1SS_MCS1); + RTW_DPD_RATE_CHECK(VHT2SS_MCS0); + RTW_DPD_RATE_CHECK(VHT2SS_MCS1); + } +#undef RTW_DPD_RATE_CHECK + + return dpd_diff; +} + static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev, struct rtw_2g_txpwr_idx *pwr_idx_2g, enum rtw_bandwidth bandwidth, @@ -1649,6 +1780,9 @@ rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate, tx_power = pwr_param.pwr_base; offset = min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit); + if (rtwdev->chip->en_dis_dpd) + offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate); + tx_power += offset; if (tx_power > rtwdev->chip->max_power_index) diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h index 7c8eb732b13c..e79b084628e7 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.h +++ b/drivers/net/wireless/realtek/rtw88/phy.h @@ -45,6 +45,15 @@ void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel); void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal); void rtw_phy_tx_power_limit_config(struct rtw_hal *hal); +struct rtw_txpwr_lmt_cfg_pair { + u8 regd; + u8 band; + u8 bw; + u8 rs; + u8 ch; + s8 txpwr_lmt; +}; + #define RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, path) \ const struct rtw_table name ## _tbl = { \ .data = name, \ @@ -137,4 +146,6 @@ rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, #define MASKBYTE3LOWNIBBLE 0x0f000000 #define MASKL3BYTES 0x00ffffff +#define CCK_FA_AVG_RESET 0xffffffff + #endif diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c index 607bfa4317d9..9ecd14feb76b 100644 --- a/drivers/net/wireless/realtek/rtw88/ps.c +++ b/drivers/net/wireless/realtek/rtw88/ps.c @@ -6,6 +6,7 @@ #include "fw.h" #include "ps.h" #include "mac.h" +#include "coex.h" #include "debug.h" static int rtw_ips_pwr_up(struct rtw_dev *rtwdev) @@ -26,6 +27,8 @@ int rtw_enter_ips(struct rtw_dev *rtwdev) { rtw_flag_set(rtwdev, RTW_FLAG_INACTIVE_PS); + rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER); + rtw_core_stop(rtwdev); return 0; @@ -53,6 +56,8 @@ int rtw_leave_ips(struct rtw_dev *rtwdev) rtw_iterate_vifs_atomic(rtwdev, rtw_restore_port_cfg_iter, rtwdev); + rtw_coex_ips_notify(rtwdev, COEX_IPS_LEAVE); + return 0; } @@ -67,6 +72,8 @@ static void rtw_leave_lps_core(struct rtw_dev *rtwdev) rtw_fw_set_pwr_mode(rtwdev); rtw_flag_clear(rtwdev, RTW_FLAG_LEISURE_PS); + + rtw_coex_lps_notify(rtwdev, COEX_LPS_DISABLE); } static void rtw_enter_lps_core(struct rtw_dev *rtwdev) @@ -78,6 +85,8 @@ static void rtw_enter_lps_core(struct rtw_dev *rtwdev) conf->rlbm = 1; conf->smart_ps = 2; + rtw_coex_lps_notify(rtwdev, COEX_LPS_ENABLE); + rtw_fw_set_pwr_mode(rtwdev); rtw_flag_set(rtwdev, RTW_FLAG_LEISURE_PS); } diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index e2628f05812c..fe793e270d22 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -37,17 +37,28 @@ #define REG_GPIO_MUXCFG 0x0040 #define BIT_FSPI_EN BIT(19) +#define BIT_BT_AOD_GPIO3 BIT(9) +#define BIT_BT_PTA_EN BIT(5) #define BIT_WLRFE_4_5_EN BIT(2) #define REG_LED_CFG 0x004C #define BIT_LNAON_SEL_EN BIT(26) #define BIT_PAPE_SEL_EN BIT(25) +#define BIT_DPDT_WL_SEL BIT(24) +#define BIT_DPDT_SEL_EN BIT(23) #define REG_PAD_CTRL1 0x0064 #define BIT_PAPE_WLBT_SEL BIT(29) #define BIT_LNAON_WLBT_SEL BIT(28) +#define BIT_BTGP_JTAG_EN BIT(24) +#define BIT_BTGP_SPI_EN BIT(20) +#define BIT_LED1DIS BIT(15) +#define BIT_SW_DPDT_SEL_DATA BIT(0) #define REG_WL_BT_PWR_CTRL 0x0068 #define BIT_BT_FUNC_EN BIT(18) #define BIT_BT_DIG_CLK_EN BIT(8) +#define REG_SYS_SDIO_CTRL 0x0070 +#define BIT_DBG_GNT_WL_BT BIT(27) +#define BIT_LTE_MUX_CTRL_PATH BIT(26) #define REG_HCI_OPT_CTRL 0x0074 #define REG_MCUFW_CTRL 0x0080 @@ -70,6 +81,8 @@ #define FW_READY_MASK 0xffff #define REG_WLRF1 0x00EC +#define REG_WIFI_BT_INFO 0x00AA +#define BIT_BT_INT_EN BIT(15) #define REG_SYS_CFG1 0x00F0 #define BIT_RTL_ID BIT(23) #define BIT_RF_TYPE_ID BIT(27) @@ -180,6 +193,8 @@ #define REG_H2C_READ_ADDR 0x024C #define REG_H2C_INFO 0x0254 +#define REG_INT_MIG 0x0304 + #define REG_FWHW_TXQ_CTRL 0x0420 #define BIT_EN_BCNQ_DL BIT(22) #define BIT_EN_WR_FREE_TAIL BIT(20) @@ -187,6 +202,7 @@ #define REG_LIFETIME_EN 0x0426 #define BIT_BA_PARSER_EN BIT(5) #define REG_SPEC_SIFS 0x0428 +#define REG_RETRY_LIMIT 0x042a #define REG_DARFRC 0x0430 #define REG_DARFRCH 0x0434 #define REG_RARFRCH 0x043C @@ -199,18 +215,25 @@ #define REG_AMPDU_MAX_TIME_V1 0x0455 #define REG_BCNQ1_BDNY_V1 0x0456 #define REG_TX_HANG_CTRL 0x045E +#define BIT_EN_GNT_BT_AWAKE BIT(3) #define BIT_EN_EOF_V1 BIT(2) #define REG_DATA_SC 0x0483 #define REG_ARFR4 0x049C +#define BIT_WL_RFK BIT(0) #define REG_ARFRH4 0x04A0 #define REG_ARFR5 0x04A4 #define REG_ARFRH5 0x04A8 #define REG_SW_AMPDU_BURST_MODE_CTRL 0x04BC #define BIT_PRE_TX_CMD BIT(6) +#define REG_QUEUE_CTRL 0x04C6 +#define BIT_PTA_WL_TX_EN BIT(4) +#define BIT_PTA_EDCCA_EN BIT(5) #define REG_PROT_MODE_CTRL 0x04C8 #define REG_BAR_MODE_CTRL 0x04CC #define REG_PRECNT_CTRL 0x04E5 +#define BIT_BTCCA_CTRL (BIT(0) | BIT(1)) #define BIT_EN_PRECNT BIT(11) +#define REG_DUMMY_PAGE4_V1 0x04FC #define REG_EDCA_VO_PARAM 0x0500 #define REG_EDCA_VI_PARAM 0x0504 @@ -297,11 +320,48 @@ #define REG_RXFLTMAP0 0x06A0 #define REG_RXFLTMAP1 0x06A2 #define REG_RXFLTMAP2 0x06A4 +#define REG_BT_COEX_TABLE0 0x06C0 +#define REG_BT_COEX_TABLE1 0x06C4 +#define REG_BT_COEX_BRK_TABLE 0x06C8 +#define REG_BT_COEX_TABLE_H 0x06CC +#define REG_BT_COEX_TABLE_H1 0x06CD +#define REG_BT_COEX_TABLE_H2 0x06CE +#define REG_BT_COEX_TABLE_H3 0x06CF #define REG_BBPSF_CTRL 0x06DC +#define REG_BT_COEX_V2 0x0763 +#define BIT_GNT_BT_POLARITY BIT(4) +#define BIT_LTE_COEX_EN BIT(7) +#define REG_BT_STAT_CTRL 0x0778 +#define REG_BT_TDMA_TIME 0x0790 #define REG_WMAC_OPTION_FUNCTION 0x07D0 #define REG_WMAC_OPTION_FUNCTION_1 0x07D4 +#define REG_RX_GAIN_EN 0x081c + +#define REG_RFE_CTRL_E 0x0974 + +#define REG_DIS_DPD 0x0a70 +#define DIS_DPD_MASK GENMASK(9, 0) +#define DIS_DPD_RATE6M BIT(0) +#define DIS_DPD_RATE9M BIT(1) +#define DIS_DPD_RATEMCS0 BIT(2) +#define DIS_DPD_RATEMCS1 BIT(3) +#define DIS_DPD_RATEMCS8 BIT(4) +#define DIS_DPD_RATEMCS9 BIT(5) +#define DIS_DPD_RATEVHT1SS_MCS0 BIT(6) +#define DIS_DPD_RATEVHT1SS_MCS1 BIT(7) +#define DIS_DPD_RATEVHT2SS_MCS0 BIT(8) +#define DIS_DPD_RATEVHT2SS_MCS1 BIT(9) +#define DIS_DPD_RATEALL GENMASK(9, 0) + +#define REG_RFE_CTRL8 0x0cb4 +#define BIT_MASK_RFE_SEL89 GENMASK(7, 0) +#define REG_RFE_INV8 0x0cbd +#define BIT_MASK_RFE_INV89 GENMASK(1, 0) +#define REG_RFE_INV16 0x0cbe +#define BIT_RFE_BUF_EN BIT(3) + #define REG_ANAPAR_XTAL_0 0x1040 #define REG_CPU_DMEM_CON 0x1080 #define BIT_WL_PLATFORM_RST BIT(16) @@ -407,15 +467,34 @@ #define LTECOEX_WRITE_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1 #define LTECOEX_READ_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1 +#define REG_IGN_GNT_BT1 0x1860 + +#define REG_RFESEL_CTRL 0x1990 + +#define REG_NOMASK_TXBT 0x1ca7 +#define REG_ANAPAR 0x1c30 +#define BIT_ANAPAR_BTPS BIT(22) +#define REG_RSTB_SEL 0x1c38 + +#define REG_IGN_GNTBT4 0x4160 + +#define RF_MODOPT 0x01 #define RF_DTXLOK 0x08 #define RF_CFGCH 0x18 +#define RF_RCK 0x1d #define RF_LUTWA 0x33 #define RF_LUTWD1 0x3e #define RF_LUTWD0 0x3f +#define RF_T_METER 0x42 #define RF_XTALX2 0xb8 #define RF_MALSEL 0xbe +#define RF_RCKD 0xde #define RF_LUTDBG 0xdf #define RF_LUTWE2 0xee #define RF_LUTWE 0xef +#define LTE_COEX_CTRL 0x38 +#define LTE_WL_TRX_CTRL 0xa0 +#define LTE_BT_TRX_CTRL 0xa4 + #endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 1172f6c0605b..63abda3b0ebf 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -3,6 +3,7 @@ */ #include "main.h" +#include "coex.h" #include "fw.h" #include "tx.h" #include "rx.h" @@ -31,6 +32,7 @@ static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) map = (struct rtw8822b_efuse *)log_map; efuse->rfe_option = map->rfe_option; + efuse->rf_board_option = map->rf_board_option; efuse->crystal_cap = map->xtal_k; efuse->pa_type_2g = map->pa_type; efuse->pa_type_5g = map->pa_type; @@ -104,24 +106,6 @@ static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev) rtw_phy_init(rtwdev); rtw8822b_phy_rfe_init(rtwdev); - - /* wifi path controller */ - rtw_write32_mask(rtwdev, 0x70, 0x4000000, 1); - /* BB control */ - rtw_write32_mask(rtwdev, 0x4c, 0x01800000, 0x2); - /* antenna mux switch */ - rtw_write8(rtwdev, 0x974, 0xff); - rtw_write32_mask(rtwdev, 0x1990, 0x300, 0); - rtw_write32_mask(rtwdev, 0xcbc, 0x80000, 0x0); - /* SW control */ - rtw_write8(rtwdev, 0xcb4, 0x77); - /* switch to WL side controller and gnt_wl gnt_bt debug signal */ - rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e); - /* gnt_wl = 1, gnt_bt = 0 */ - rtw_write32(rtwdev, 0x1704, 0x7700); - rtw_write32(rtwdev, 0x1700, 0xc00f0038); - /* switch for WL 2G */ - rtw_write8(rtwdev, 0xcbd, 0x2); } #define WLAN_SLOT_TIME 0x09 @@ -782,6 +766,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status, s8 min_rx_power = -120; u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status); + /* 8822B uses only 1 antenna to RX CCK rates */ pkt_stat->rx_power[RF_PATH_A] = pwdb - 110; pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1); pkt_stat->bw = RTW_CHANNEL_WIDTH_20; @@ -960,6 +945,7 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev) u32 cck_enable; u32 cck_fa_cnt; u32 ofdm_fa_cnt; + u32 crc32_cnt; cck_enable = rtw_read32(rtwdev, 0x808) & BIT(28); cck_fa_cnt = rtw_read16(rtwdev, 0xa5c); @@ -970,6 +956,19 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev) dm_info->total_fa_cnt = ofdm_fa_cnt; dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0; + crc32_cnt = rtw_read32(rtwdev, 0xf04); + dm_info->cck_ok_cnt = crc32_cnt & 0xffff; + dm_info->cck_err_cnt = (crc32_cnt & 0xffff0000) >> 16; + crc32_cnt = rtw_read32(rtwdev, 0xf14); + dm_info->ofdm_ok_cnt = crc32_cnt & 0xffff; + dm_info->ofdm_err_cnt = (crc32_cnt & 0xffff0000) >> 16; + crc32_cnt = rtw_read32(rtwdev, 0xf10); + dm_info->ht_ok_cnt = crc32_cnt & 0xffff; + dm_info->ht_err_cnt = (crc32_cnt & 0xffff0000) >> 16; + crc32_cnt = rtw_read32(rtwdev, 0xf0c); + dm_info->vht_ok_cnt = crc32_cnt & 0xffff; + dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16; + rtw_write32_set(rtwdev, 0x9a4, BIT(17)); rtw_write32_clr(rtwdev, 0x9a4, BIT(17)); rtw_write32_clr(rtwdev, 0xa2c, BIT(15)); @@ -997,12 +996,265 @@ static void rtw8822b_do_iqk(struct rtw_dev *rtwdev) rtw_write_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK, 0x0); reload = !!rtw_read32_mask(rtwdev, REG_IQKFAILMSK, BIT(16)); - iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(0, 7)); + iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(7, 0)); rtw_dbg(rtwdev, RTW_DBG_PHY, "iqk counter=%d reload=%d do_iqk_cnt=%d n_iqk_fail(mask)=0x%02x\n", counter, reload, ++do_iqk_cnt, iqk_fail_mask); } +static void rtw8822b_phy_calibration(struct rtw_dev *rtwdev) +{ + rtw8822b_do_iqk(rtwdev); +} + +static void rtw8822b_coex_cfg_init(struct rtw_dev *rtwdev) +{ + /* enable TBTT nterrupt */ + rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); + + /* BT report packet sample rate */ + /* 0x790[5:0]=0x5 */ + rtw_write8_set(rtwdev, REG_BT_TDMA_TIME, 0x05); + + /* enable BT counter statistics */ + rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1); + + /* enable PTA (3-wire function form BT side) */ + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN); + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_AOD_GPIO3); + + /* enable PTA (tx/rx signal form WiFi side) */ + rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN); + /* wl tx signal to PTA not case EDCCA */ + rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT_PTA_EDCCA_EN); + /* GNT_BT=1 while select both */ + rtw_write8_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY); +} + +static void rtw8822b_coex_cfg_ant_switch(struct rtw_dev *rtwdev, + u8 ctrl_type, u8 pos_type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + bool polarity_inverse; + u8 regval = 0; + + if (((ctrl_type << 8) + pos_type) == coex_dm->cur_switch_status) + return; + + coex_dm->cur_switch_status = (ctrl_type << 8) + pos_type; + + if (coex_rfe->ant_switch_diversity && + ctrl_type == COEX_SWITCH_CTRL_BY_BBSW) + ctrl_type = COEX_SWITCH_CTRL_BY_ANTDIV; + + polarity_inverse = (coex_rfe->ant_switch_polarity == 1); + + switch (ctrl_type) { + default: + case COEX_SWITCH_CTRL_BY_BBSW: + /* 0x4c[23] = 0 */ + rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0); + /* 0x4c[24] = 1 */ + rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1); + /* BB SW, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */ + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 0x77); + + if (pos_type == COEX_SWITCH_TO_WLG_BT) { + if (coex_rfe->rfe_module_type != 0x4 && + coex_rfe->rfe_module_type != 0x2) + regval = 0x3; + else + regval = (!polarity_inverse ? 0x2 : 0x1); + } else if (pos_type == COEX_SWITCH_TO_WLG) { + regval = (!polarity_inverse ? 0x2 : 0x1); + } else { + regval = (!polarity_inverse ? 0x1 : 0x2); + } + + rtw_write8_mask(rtwdev, REG_RFE_INV8, BIT_MASK_RFE_INV89, regval); + break; + case COEX_SWITCH_CTRL_BY_PTA: + /* 0x4c[23] = 0 */ + rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0); + /* 0x4c[24] = 1 */ + rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1); + /* PTA, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */ + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 0x66); + + regval = (!polarity_inverse ? 0x2 : 0x1); + rtw_write8_mask(rtwdev, REG_RFE_INV8, BIT_MASK_RFE_INV89, regval); + break; + case COEX_SWITCH_CTRL_BY_ANTDIV: + /* 0x4c[23] = 0 */ + rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0); + /* 0x4c[24] = 1 */ + rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1); + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 0x88); + break; + case COEX_SWITCH_CTRL_BY_MAC: + /* 0x4c[23] = 1 */ + rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x1); + + regval = (!polarity_inverse ? 0x0 : 0x1); + rtw_write8_mask(rtwdev, REG_PAD_CTRL1, BIT_SW_DPDT_SEL_DATA, regval); + break; + case COEX_SWITCH_CTRL_BY_FW: + /* 0x4c[23] = 0 */ + rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0); + /* 0x4c[24] = 1 */ + rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1); + break; + case COEX_SWITCH_CTRL_BY_BT: + /* 0x4c[23] = 0 */ + rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0); + /* 0x4c[24] = 0 */ + rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x0); + break; + } +} + +static void rtw8822b_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) +{ +} + +static void rtw8822b_coex_cfg_gnt_debug(struct rtw_dev *rtwdev) +{ + rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 2, BIT_BTGP_SPI_EN >> 16, 0); + rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 3, BIT_BTGP_JTAG_EN >> 24, 0); + rtw_write8_mask(rtwdev, REG_GPIO_MUXCFG + 2, BIT_FSPI_EN >> 16, 0); + rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 1, BIT_LED1DIS >> 8, 0); + rtw_write8_mask(rtwdev, REG_SYS_SDIO_CTRL + 3, BIT_DBG_GNT_WL_BT >> 24, 0); +} + +static void rtw8822b_coex_cfg_rfe_type(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + struct rtw_efuse *efuse = &rtwdev->efuse; + bool is_ext_fem = false; + + coex_rfe->rfe_module_type = rtwdev->efuse.rfe_option; + coex_rfe->ant_switch_polarity = 0; + coex_rfe->ant_switch_diversity = false; + if (coex_rfe->rfe_module_type == 0x12 || + coex_rfe->rfe_module_type == 0x15 || + coex_rfe->rfe_module_type == 0x16) + coex_rfe->ant_switch_exist = false; + else + coex_rfe->ant_switch_exist = true; + + if (coex_rfe->rfe_module_type == 2 || + coex_rfe->rfe_module_type == 4) { + rtw_coex_write_scbd(rtwdev, COEX_SCBD_EXTFEM, true); + is_ext_fem = true; + } else { + rtw_coex_write_scbd(rtwdev, COEX_SCBD_EXTFEM, false); + } + + coex_rfe->wlg_at_btg = false; + + if (efuse->share_ant && + coex_rfe->ant_switch_exist && !is_ext_fem) + coex_rfe->ant_switch_with_bt = true; + else + coex_rfe->ant_switch_with_bt = false; + + /* Ext switch buffer mux */ + rtw_write8(rtwdev, REG_RFE_CTRL_E, 0xff); + rtw_write8_mask(rtwdev, REG_RFESEL_CTRL + 1, 0x3, 0x0); + rtw_write8_mask(rtwdev, REG_RFE_INV16, BIT_RFE_BUF_EN, 0x0); + + /* Disable LTE Coex Function in WiFi side */ + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, BIT_LTE_COEX_EN, 0); + + /* BTC_CTT_WL_VS_LTE */ + rtw_coex_write_indirect_reg(rtwdev, LTE_WL_TRX_CTRL, MASKLWORD, 0xffff); + + /* BTC_CTT_BT_VS_LTE */ + rtw_coex_write_indirect_reg(rtwdev, LTE_BT_TRX_CTRL, MASKLWORD, 0xffff); +} + +static void rtw8822b_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + static const u16 reg_addr[] = {0xc58, 0xe58}; + static const u8 wl_tx_power[] = {0xd8, 0xd4, 0xd0, 0xcc, 0xc8}; + u8 i, pwr; + + if (wl_pwr == coex_dm->cur_wl_pwr_lvl) + return; + + coex_dm->cur_wl_pwr_lvl = wl_pwr; + + if (coex_dm->cur_wl_pwr_lvl >= ARRAY_SIZE(wl_tx_power)) + coex_dm->cur_wl_pwr_lvl = ARRAY_SIZE(wl_tx_power) - 1; + + pwr = wl_tx_power[coex_dm->cur_wl_pwr_lvl]; + + for (i = 0; i < ARRAY_SIZE(reg_addr); i++) + rtw_write8_mask(rtwdev, reg_addr[i], 0xff, pwr); +} + +static void rtw8822b_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + /* WL Rx Low gain on */ + static const u32 wl_rx_low_gain_on[] = { + 0xff000003, 0xbd120003, 0xbe100003, 0xbf080003, 0xbf060003, + 0xbf050003, 0xbc140003, 0xbb160003, 0xba180003, 0xb91a0003, + 0xb81c0003, 0xb71e0003, 0xb4200003, 0xb5220003, 0xb4240003, + 0xb3260003, 0xb2280003, 0xb12a0003, 0xb02c0003, 0xaf2e0003, + 0xae300003, 0xad320003, 0xac340003, 0xab360003, 0x8d380003, + 0x8c3a0003, 0x8b3c0003, 0x8a3e0003, 0x6e400003, 0x6d420003, + 0x6c440003, 0x6b460003, 0x6a480003, 0x694a0003, 0x684c0003, + 0x674e0003, 0x66500003, 0x65520003, 0x64540003, 0x64560003, + 0x007e0403 + }; + + /* WL Rx Low gain off */ + static const u32 wl_rx_low_gain_off[] = { + 0xff000003, 0xf4120003, 0xf5100003, 0xf60e0003, 0xf70c0003, + 0xf80a0003, 0xf3140003, 0xf2160003, 0xf1180003, 0xf01a0003, + 0xef1c0003, 0xee1e0003, 0xed200003, 0xec220003, 0xeb240003, + 0xea260003, 0xe9280003, 0xe82a0003, 0xe72c0003, 0xe62e0003, + 0xe5300003, 0xc8320003, 0xc7340003, 0xc6360003, 0xc5380003, + 0xc43a0003, 0xc33c0003, 0xc23e0003, 0xc1400003, 0xc0420003, + 0xa5440003, 0xa4460003, 0xa3480003, 0xa24a0003, 0xa14c0003, + 0x834e0003, 0x82500003, 0x81520003, 0x80540003, 0x65560003, + 0x007e0403 + }; + u8 i; + + if (low_gain == coex_dm->cur_wl_rx_low_gain_en) + return; + + coex_dm->cur_wl_rx_low_gain_en = low_gain; + + if (coex_dm->cur_wl_rx_low_gain_en) { + for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_on); i++) + rtw_write32(rtwdev, REG_RX_GAIN_EN, wl_rx_low_gain_on[i]); + + /* set Rx filter corner RCK offset */ + rtw_write_rf(rtwdev, RF_PATH_A, RF_RCKD, 0x2, 0x1); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RCK, 0x3f, 0x3f); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RCKD, 0x2, 0x1); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK, 0x3f, 0x3f); + } else { + for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_off); i++) + rtw_write32(rtwdev, 0x81c, wl_rx_low_gain_off[i]); + + /* set Rx filter corner RCK offset */ + rtw_write_rf(rtwdev, RF_PATH_A, RF_RCK, 0x3f, 0x4); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RCKD, 0x2, 0x0); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK, 0x3f, 0x4); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RCKD, 0x2, 0x0); + } +} + static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = { {0x0086, RTW_PWR_CUT_ALL_MSK, @@ -1548,9 +1800,161 @@ static struct rtw_chip_ops rtw8822b_ops = { .set_antenna = rtw8822b_set_antenna, .cfg_ldo25 = rtw8822b_cfg_ldo25, .false_alarm_statistics = rtw8822b_false_alarm_statistics, - .do_iqk = rtw8822b_do_iqk, + .phy_calibration = rtw8822b_phy_calibration, + + .coex_set_init = rtw8822b_coex_cfg_init, + .coex_set_ant_switch = rtw8822b_coex_cfg_ant_switch, + .coex_set_gnt_fix = rtw8822b_coex_cfg_gnt_fix, + .coex_set_gnt_debug = rtw8822b_coex_cfg_gnt_debug, + .coex_set_rfe_type = rtw8822b_coex_cfg_rfe_type, + .coex_set_wl_tx_power = rtw8822b_coex_cfg_wl_tx_power, + .coex_set_wl_rx_gain = rtw8822b_coex_cfg_wl_rx_gain, +}; + +/* Shared-Antenna Coex Table */ +static const struct coex_table_para table_sant_8822b[] = { + {0xffffffff, 0xffffffff}, /* case-0 */ + {0x55555555, 0x55555555}, + {0x66555555, 0x66555555}, + {0xaaaaaaaa, 0xaaaaaaaa}, + {0x5a5a5a5a, 0x5a5a5a5a}, + {0xfafafafa, 0xfafafafa}, /* case-5 */ + {0x6a5a6a5a, 0xaaaaaaaa}, + {0x6a5a56aa, 0x6a5a56aa}, + {0x6a5a5a5a, 0x6a5a5a5a}, + {0x66555555, 0x5a5a5a5a}, + {0x66555555, 0x6a5a5a5a}, /* case-10 */ + {0x66555555, 0xfafafafa}, + {0x66555555, 0x6a5a5aaa}, + {0x66555555, 0x5aaa5aaa}, + {0x66555555, 0xaaaa5aaa}, + {0x66555555, 0xaaaaaaaa}, /* case-15 */ + {0xffff55ff, 0xfafafafa}, + {0xffff55ff, 0x6afa5afa}, + {0xaaffffaa, 0xfafafafa}, + {0xaa5555aa, 0x5a5a5a5a}, + {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */ + {0xaa5555aa, 0xaaaaaaaa}, + {0xffffffff, 0x5a5a5a5a}, + {0xffffffff, 0x6a5a5a5a}, + {0xffffffff, 0x55555555}, + {0xffffffff, 0x6a5a5aaa}, /* case-25 */ + {0x55555555, 0x5a5a5a5a}, + {0x55555555, 0xaaaaaaaa}, + {0x55555555, 0x6a5a6a5a}, + {0x66556655, 0x66556655} +}; + +/* Non-Shared-Antenna Coex Table */ +static const struct coex_table_para table_nsant_8822b[] = { + {0xffffffff, 0xffffffff}, /* case-100 */ + {0x55555555, 0x55555555}, + {0x66555555, 0x66555555}, + {0xaaaaaaaa, 0xaaaaaaaa}, + {0x5a5a5a5a, 0x5a5a5a5a}, + {0xfafafafa, 0xfafafafa}, /* case-105 */ + {0x5afa5afa, 0x5afa5afa}, + {0x55555555, 0xfafafafa}, + {0x66555555, 0xfafafafa}, + {0x66555555, 0x5a5a5a5a}, + {0x66555555, 0x6a5a5a5a}, /* case-110 */ + {0x66555555, 0xaaaaaaaa}, + {0xffff55ff, 0xfafafafa}, + {0xffff55ff, 0x5afa5afa}, + {0xffff55ff, 0xaaaaaaaa}, + {0xaaffffaa, 0xfafafafa}, /* case-115 */ + {0xaaffffaa, 0x5afa5afa}, + {0xaaffffaa, 0xaaaaaaaa}, + {0xffffffff, 0xfafafafa}, + {0xffffffff, 0x5afa5afa}, + {0xffffffff, 0xaaaaaaaa}, /* case-120 */ + {0x55ff55ff, 0x5afa5afa}, + {0x55ff55ff, 0xaaaaaaaa}, + {0x55ff55ff, 0x55ff55ff} }; +/* Shared-Antenna TDMA */ +static const struct coex_tdma_para tdma_sant_8822b[] = { + { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */ + { {0x61, 0x45, 0x03, 0x11, 0x11} }, + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, + { {0x61, 0x30, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, + { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */ + { {0x61, 0x45, 0x03, 0x11, 0x10} }, + { {0x61, 0x3a, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x20, 0x03, 0x11, 0x10} }, + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */ + { {0x61, 0x08, 0x03, 0x11, 0x14} }, + { {0x61, 0x08, 0x03, 0x10, 0x14} }, + { {0x51, 0x08, 0x03, 0x10, 0x54} }, + { {0x51, 0x08, 0x03, 0x10, 0x55} }, + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */ + { {0x51, 0x45, 0x03, 0x10, 0x10} }, + { {0x51, 0x3a, 0x03, 0x10, 0x50} }, + { {0x51, 0x30, 0x03, 0x10, 0x50} }, + { {0x51, 0x20, 0x03, 0x10, 0x50} }, + { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */ + { {0x51, 0x4a, 0x03, 0x10, 0x50} }, + { {0x51, 0x0c, 0x03, 0x10, 0x54} }, + { {0x55, 0x08, 0x03, 0x10, 0x54} }, + { {0x65, 0x10, 0x03, 0x11, 0x11} }, + { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ + { {0x51, 0x08, 0x03, 0x10, 0x50} } +}; + +/* Non-Shared-Antenna TDMA */ +static const struct coex_tdma_para tdma_nsant_8822b[] = { + { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-100 */ + { {0x61, 0x45, 0x03, 0x11, 0x11} }, + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, + { {0x61, 0x30, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, + { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */ + { {0x61, 0x45, 0x03, 0x11, 0x10} }, + { {0x61, 0x3a, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x20, 0x03, 0x11, 0x10} }, + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */ + { {0x61, 0x08, 0x03, 0x11, 0x14} }, + { {0x61, 0x08, 0x03, 0x10, 0x14} }, + { {0x51, 0x08, 0x03, 0x10, 0x54} }, + { {0x51, 0x08, 0x03, 0x10, 0x55} }, + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */ + { {0x51, 0x45, 0x03, 0x10, 0x50} }, + { {0x51, 0x3a, 0x03, 0x10, 0x50} }, + { {0x51, 0x30, 0x03, 0x10, 0x50} }, + { {0x51, 0x20, 0x03, 0x10, 0x50} }, + { {0x51, 0x10, 0x03, 0x10, 0x50} } /* case-120 */ +}; + +/* rssi in percentage % (dbm = % - 100) */ +static const u8 wl_rssi_step_8822b[] = {60, 50, 44, 30}; +static const u8 bt_rssi_step_8822b[] = {30, 30, 30, 30}; +static const struct coex_5g_afh_map afh_5g_8822b[] = { {0, 0, 0} }; + +/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */ +static const struct coex_rf_para rf_para_tx_8822b[] = { + {0, 0, false, 7}, /* for normal */ + {0, 16, false, 7}, /* for WL-CPT */ + {4, 0, true, 1}, + {3, 6, true, 1}, + {2, 9, true, 1}, + {1, 13, true, 1} +}; + +static const struct coex_rf_para rf_para_rx_8822b[] = { + {0, 0, false, 7}, /* for normal */ + {0, 16, false, 7}, /* for WL-CPT */ + {4, 0, true, 1}, + {3, 6, true, 1}, + {2, 9, true, 1}, + {1, 13, true, 1} +}; + +static_assert(ARRAY_SIZE(rf_para_tx_8822b) == ARRAY_SIZE(rf_para_rx_8822b)); + struct rtw_chip_info rtw8822b_hw_spec = { .ops = &rtw8822b_ops, .id = RTW_CHIP_TYPE_8822B, @@ -1588,6 +1992,32 @@ struct rtw_chip_info rtw8822b_hw_spec = { .rf_tbl = {&rtw8822b_rf_a_tbl, &rtw8822b_rf_b_tbl}, .rfe_defs = rtw8822b_rfe_defs, .rfe_defs_size = ARRAY_SIZE(rtw8822b_rfe_defs), + + .coex_para_ver = 0x19062706, + .bt_desired_ver = 0x6, + .scbd_support = true, + .new_scbd10_def = false, + .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, + .bt_rssi_type = COEX_BTRSSI_RATIO, + .ant_isolation = 15, + .rssi_tolerance = 2, + .wl_rssi_step = wl_rssi_step_8822b, + .bt_rssi_step = bt_rssi_step_8822b, + .table_sant_num = ARRAY_SIZE(table_sant_8822b), + .table_sant = table_sant_8822b, + .table_nsant_num = ARRAY_SIZE(table_nsant_8822b), + .table_nsant = table_nsant_8822b, + .tdma_sant_num = ARRAY_SIZE(tdma_sant_8822b), + .tdma_sant = tdma_sant_8822b, + .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8822b), + .tdma_nsant = tdma_nsant_8822b, + .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8822b), + .wl_rf_para_tx = rf_para_tx_8822b, + .wl_rf_para_rx = rf_para_rx_8822b, + .bt_afh_span_bw20 = 0x24, + .bt_afh_span_bw40 = 0x36, + .afh_5g_num = ARRAY_SIZE(afh_5g_8822b), + .afh_5g = afh_5g_8822b, }; EXPORT_SYMBOL(rtw8822b_hw_spec); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c index 2d2dfb495ce1..465f58411cab 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c @@ -20382,402 +20382,1182 @@ static const u32 rtw8822b_rf_b[] = { RTW_DECL_TABLE_RF_RADIO(rtw8822b_rf_b, B); -static const u8 rtw8822b_txpwr_lmt_type2[] = { - 0, 0, 0, 0, 1, 32, 2, 0, 0, 0, 1, 28, 1, 0, 0, 0, 1, 30, - 0, 0, 0, 0, 2, 32, 2, 0, 0, 0, 2, 28, 1, 0, 0, 0, 2, 30, - 0, 0, 0, 0, 3, 32, 2, 0, 0, 0, 3, 28, 1, 0, 0, 0, 3, 30, - 0, 0, 0, 0, 4, 32, 2, 0, 0, 0, 4, 28, 1, 0, 0, 0, 4, 30, - 0, 0, 0, 0, 5, 32, 2, 0, 0, 0, 5, 28, 1, 0, 0, 0, 5, 30, - 0, 0, 0, 0, 6, 32, 2, 0, 0, 0, 6, 28, 1, 0, 0, 0, 6, 30, - 0, 0, 0, 0, 7, 32, 2, 0, 0, 0, 7, 28, 1, 0, 0, 0, 7, 30, - 0, 0, 0, 0, 8, 32, 2, 0, 0, 0, 8, 28, 1, 0, 0, 0, 8, 30, - 0, 0, 0, 0, 9, 32, 2, 0, 0, 0, 9, 28, 1, 0, 0, 0, 9, 30, - 0, 0, 0, 0, 10, 32, 2, 0, 0, 0, 10, 28, 1, 0, 0, 0, 10, 30, - 0, 0, 0, 0, 11, 32, 2, 0, 0, 0, 11, 28, 1, 0, 0, 0, 11, 30, - 0, 0, 0, 0, 12, 26, 2, 0, 0, 0, 12, 28, 1, 0, 0, 0, 12, 30, - 0, 0, 0, 0, 13, 20, 2, 0, 0, 0, 13, 28, 1, 0, 0, 0, 13, 28, - 0, 0, 0, 0, 14, 63, 2, 0, 0, 0, 14, 63, 1, 0, 0, 0, 14, 32, - 0, 0, 0, 1, 1, 26, 2, 0, 0, 1, 1, 30, 1, 0, 0, 1, 1, 34, - 0, 0, 0, 1, 2, 30, 2, 0, 0, 1, 2, 30, 1, 0, 0, 1, 2, 34, - 0, 0, 0, 1, 3, 32, 2, 0, 0, 1, 3, 30, 1, 0, 0, 1, 3, 34, - 0, 0, 0, 1, 4, 34, 2, 0, 0, 1, 4, 30, 1, 0, 0, 1, 4, 34, - 0, 0, 0, 1, 5, 34, 2, 0, 0, 1, 5, 30, 1, 0, 0, 1, 5, 34, - 0, 0, 0, 1, 6, 34, 2, 0, 0, 1, 6, 30, 1, 0, 0, 1, 6, 34, - 0, 0, 0, 1, 7, 34, 2, 0, 0, 1, 7, 30, 1, 0, 0, 1, 7, 34, - 0, 0, 0, 1, 8, 34, 2, 0, 0, 1, 8, 30, 1, 0, 0, 1, 8, 34, - 0, 0, 0, 1, 9, 32, 2, 0, 0, 1, 9, 30, 1, 0, 0, 1, 9, 34, - 0, 0, 0, 1, 10, 30, 2, 0, 0, 1, 10, 30, 1, 0, 0, 1, 10, 34, - 0, 0, 0, 1, 11, 28, 2, 0, 0, 1, 11, 30, 1, 0, 0, 1, 11, 34, - 0, 0, 0, 1, 12, 22, 2, 0, 0, 1, 12, 30, 1, 0, 0, 1, 12, 34, - 0, 0, 0, 1, 13, 14, 2, 0, 0, 1, 13, 30, 1, 0, 0, 1, 13, 34, - 0, 0, 0, 1, 14, 63, 2, 0, 0, 1, 14, 63, 1, 0, 0, 1, 14, 63, - 0, 0, 0, 2, 1, 26, 2, 0, 0, 2, 1, 30, 1, 0, 0, 2, 1, 34, - 0, 0, 0, 2, 2, 30, 2, 0, 0, 2, 2, 30, 1, 0, 0, 2, 2, 34, - 0, 0, 0, 2, 3, 32, 2, 0, 0, 2, 3, 30, 1, 0, 0, 2, 3, 34, - 0, 0, 0, 2, 4, 34, 2, 0, 0, 2, 4, 30, 1, 0, 0, 2, 4, 34, - 0, 0, 0, 2, 5, 34, 2, 0, 0, 2, 5, 30, 1, 0, 0, 2, 5, 34, - 0, 0, 0, 2, 6, 34, 2, 0, 0, 2, 6, 30, 1, 0, 0, 2, 6, 34, - 0, 0, 0, 2, 7, 34, 2, 0, 0, 2, 7, 30, 1, 0, 0, 2, 7, 34, - 0, 0, 0, 2, 8, 34, 2, 0, 0, 2, 8, 30, 1, 0, 0, 2, 8, 34, - 0, 0, 0, 2, 9, 32, 2, 0, 0, 2, 9, 30, 1, 0, 0, 2, 9, 34, - 0, 0, 0, 2, 10, 30, 2, 0, 0, 2, 10, 30, 1, 0, 0, 2, 10, 34, - 0, 0, 0, 2, 11, 26, 2, 0, 0, 2, 11, 30, 1, 0, 0, 2, 11, 34, - 0, 0, 0, 2, 12, 20, 2, 0, 0, 2, 12, 30, 1, 0, 0, 2, 12, 34, - 0, 0, 0, 2, 13, 14, 2, 0, 0, 2, 13, 30, 1, 0, 0, 2, 13, 34, - 0, 0, 0, 2, 14, 63, 2, 0, 0, 2, 14, 63, 1, 0, 0, 2, 14, 63, - 0, 0, 0, 3, 1, 26, 2, 0, 0, 3, 1, 18, 1, 0, 0, 3, 1, 30, - 0, 0, 0, 3, 2, 28, 2, 0, 0, 3, 2, 18, 1, 0, 0, 3, 2, 30, - 0, 0, 0, 3, 3, 30, 2, 0, 0, 3, 3, 18, 1, 0, 0, 3, 3, 30, - 0, 0, 0, 3, 4, 30, 2, 0, 0, 3, 4, 18, 1, 0, 0, 3, 4, 30, - 0, 0, 0, 3, 5, 32, 2, 0, 0, 3, 5, 18, 1, 0, 0, 3, 5, 30, - 0, 0, 0, 3, 6, 32, 2, 0, 0, 3, 6, 18, 1, 0, 0, 3, 6, 30, - 0, 0, 0, 3, 7, 32, 2, 0, 0, 3, 7, 18, 1, 0, 0, 3, 7, 30, - 0, 0, 0, 3, 8, 30, 2, 0, 0, 3, 8, 18, 1, 0, 0, 3, 8, 30, - 0, 0, 0, 3, 9, 30, 2, 0, 0, 3, 9, 18, 1, 0, 0, 3, 9, 30, - 0, 0, 0, 3, 10, 28, 2, 0, 0, 3, 10, 18, 1, 0, 0, 3, 10, 30, - 0, 0, 0, 3, 11, 26, 2, 0, 0, 3, 11, 18, 1, 0, 0, 3, 11, 30, - 0, 0, 0, 3, 12, 20, 2, 0, 0, 3, 12, 18, 1, 0, 0, 3, 12, 30, - 0, 0, 0, 3, 13, 14, 2, 0, 0, 3, 13, 18, 1, 0, 0, 3, 13, 30, - 0, 0, 0, 3, 14, 63, 2, 0, 0, 3, 14, 63, 1, 0, 0, 3, 14, 63, - 0, 0, 1, 2, 1, 63, 2, 0, 1, 2, 1, 63, 1, 0, 1, 2, 1, 63, - 0, 0, 1, 2, 2, 63, 2, 0, 1, 2, 2, 63, 1, 0, 1, 2, 2, 63, - 0, 0, 1, 2, 3, 26, 2, 0, 1, 2, 3, 30, 1, 0, 1, 2, 3, 34, - 0, 0, 1, 2, 4, 26, 2, 0, 1, 2, 4, 30, 1, 0, 1, 2, 4, 34, - 0, 0, 1, 2, 5, 30, 2, 0, 1, 2, 5, 30, 1, 0, 1, 2, 5, 34, - 0, 0, 1, 2, 6, 32, 2, 0, 1, 2, 6, 30, 1, 0, 1, 2, 6, 34, - 0, 0, 1, 2, 7, 30, 2, 0, 1, 2, 7, 30, 1, 0, 1, 2, 7, 34, - 0, 0, 1, 2, 8, 26, 2, 0, 1, 2, 8, 30, 1, 0, 1, 2, 8, 34, - 0, 0, 1, 2, 9, 26, 2, 0, 1, 2, 9, 30, 1, 0, 1, 2, 9, 34, - 0, 0, 1, 2, 10, 20, 2, 0, 1, 2, 10, 30, 1, 0, 1, 2, 10, 34, - 0, 0, 1, 2, 11, 14, 2, 0, 1, 2, 11, 30, 1, 0, 1, 2, 11, 34, - 0, 0, 1, 2, 12, 63, 2, 0, 1, 2, 12, 63, 1, 0, 1, 2, 12, 63, - 0, 0, 1, 2, 13, 63, 2, 0, 1, 2, 13, 63, 1, 0, 1, 2, 13, 63, - 0, 0, 1, 2, 14, 63, 2, 0, 1, 2, 14, 63, 1, 0, 1, 2, 14, 63, - 0, 0, 1, 3, 1, 63, 2, 0, 1, 3, 1, 63, 1, 0, 1, 3, 1, 63, - 0, 0, 1, 3, 2, 63, 2, 0, 1, 3, 2, 63, 1, 0, 1, 3, 2, 63, - 0, 0, 1, 3, 3, 24, 2, 0, 1, 3, 3, 18, 1, 0, 1, 3, 3, 30, - 0, 0, 1, 3, 4, 24, 2, 0, 1, 3, 4, 18, 1, 0, 1, 3, 4, 30, - 0, 0, 1, 3, 5, 26, 2, 0, 1, 3, 5, 18, 1, 0, 1, 3, 5, 30, - 0, 0, 1, 3, 6, 28, 2, 0, 1, 3, 6, 18, 1, 0, 1, 3, 6, 30, - 0, 0, 1, 3, 7, 26, 2, 0, 1, 3, 7, 18, 1, 0, 1, 3, 7, 30, - 0, 0, 1, 3, 8, 26, 2, 0, 1, 3, 8, 18, 1, 0, 1, 3, 8, 30, - 0, 0, 1, 3, 9, 26, 2, 0, 1, 3, 9, 18, 1, 0, 1, 3, 9, 30, - 0, 0, 1, 3, 10, 20, 2, 0, 1, 3, 10, 18, 1, 0, 1, 3, 10, 30, - 0, 0, 1, 3, 11, 14, 2, 0, 1, 3, 11, 18, 1, 0, 1, 3, 11, 30, - 0, 0, 1, 3, 12, 63, 2, 0, 1, 3, 12, 63, 1, 0, 1, 3, 12, 63, - 0, 0, 1, 3, 13, 63, 2, 0, 1, 3, 13, 63, 1, 0, 1, 3, 13, 63, - 0, 0, 1, 3, 14, 63, 2, 0, 1, 3, 14, 63, 1, 0, 1, 3, 14, 63, - 0, 1, 0, 1, 36, 36, 2, 1, 0, 1, 36, 32, 1, 1, 0, 1, 36, 30, - 0, 1, 0, 1, 40, 38, 2, 1, 0, 1, 40, 32, 1, 1, 0, 1, 40, 30, - 0, 1, 0, 1, 44, 38, 2, 1, 0, 1, 44, 32, 1, 1, 0, 1, 44, 30, - 0, 1, 0, 1, 48, 38, 2, 1, 0, 1, 48, 32, 1, 1, 0, 1, 48, 30, - 0, 1, 0, 1, 52, 38, 2, 1, 0, 1, 52, 32, 1, 1, 0, 1, 52, 28, - 0, 1, 0, 1, 56, 38, 2, 1, 0, 1, 56, 32, 1, 1, 0, 1, 56, 28, - 0, 1, 0, 1, 60, 38, 2, 1, 0, 1, 60, 32, 1, 1, 0, 1, 60, 28, - 0, 1, 0, 1, 64, 34, 2, 1, 0, 1, 64, 32, 1, 1, 0, 1, 64, 28, - 0, 1, 0, 1, 100, 32, 2, 1, 0, 1, 100, 32, 1, 1, 0, 1, 100, 32, - 0, 1, 0, 1, 104, 38, 2, 1, 0, 1, 104, 32, 1, 1, 0, 1, 104, 32, - 0, 1, 0, 1, 108, 38, 2, 1, 0, 1, 108, 32, 1, 1, 0, 1, 108, 32, - 0, 1, 0, 1, 112, 38, 2, 1, 0, 1, 112, 32, 1, 1, 0, 1, 112, 32, - 0, 1, 0, 1, 116, 38, 2, 1, 0, 1, 116, 32, 1, 1, 0, 1, 116, 32, - 0, 1, 0, 1, 120, 38, 2, 1, 0, 1, 120, 32, 1, 1, 0, 1, 120, 32, - 0, 1, 0, 1, 124, 38, 2, 1, 0, 1, 124, 32, 1, 1, 0, 1, 124, 32, - 0, 1, 0, 1, 128, 38, 2, 1, 0, 1, 128, 32, 1, 1, 0, 1, 128, 32, - 0, 1, 0, 1, 132, 38, 2, 1, 0, 1, 132, 32, 1, 1, 0, 1, 132, 32, - 0, 1, 0, 1, 136, 38, 2, 1, 0, 1, 136, 32, 1, 1, 0, 1, 136, 32, - 0, 1, 0, 1, 140, 34, 2, 1, 0, 1, 140, 32, 1, 1, 0, 1, 140, 32, - 0, 1, 0, 1, 144, 34, 2, 1, 0, 1, 144, 32, 1, 1, 0, 1, 144, 63, - 0, 1, 0, 1, 149, 38, 2, 1, 0, 1, 149, 63, 1, 1, 0, 1, 149, 63, - 0, 1, 0, 1, 153, 38, 2, 1, 0, 1, 153, 63, 1, 1, 0, 1, 153, 63, - 0, 1, 0, 1, 157, 38, 2, 1, 0, 1, 157, 63, 1, 1, 0, 1, 157, 63, - 0, 1, 0, 1, 161, 38, 2, 1, 0, 1, 161, 63, 1, 1, 0, 1, 161, 63, - 0, 1, 0, 1, 165, 38, 2, 1, 0, 1, 165, 63, 1, 1, 0, 1, 165, 63, - 0, 1, 0, 2, 36, 36, 2, 1, 0, 2, 36, 32, 1, 1, 0, 2, 36, 28, - 0, 1, 0, 2, 40, 38, 2, 1, 0, 2, 40, 32, 1, 1, 0, 2, 40, 28, - 0, 1, 0, 2, 44, 38, 2, 1, 0, 2, 44, 32, 1, 1, 0, 2, 44, 28, - 0, 1, 0, 2, 48, 38, 2, 1, 0, 2, 48, 32, 1, 1, 0, 2, 48, 28, - 0, 1, 0, 2, 52, 38, 2, 1, 0, 2, 52, 32, 1, 1, 0, 2, 52, 28, - 0, 1, 0, 2, 56, 38, 2, 1, 0, 2, 56, 32, 1, 1, 0, 2, 56, 28, - 0, 1, 0, 2, 60, 38, 2, 1, 0, 2, 60, 32, 1, 1, 0, 2, 60, 28, - 0, 1, 0, 2, 64, 34, 2, 1, 0, 2, 64, 32, 1, 1, 0, 2, 64, 28, - 0, 1, 0, 2, 100, 32, 2, 1, 0, 2, 100, 32, 1, 1, 0, 2, 100, 32, - 0, 1, 0, 2, 104, 38, 2, 1, 0, 2, 104, 32, 1, 1, 0, 2, 104, 32, - 0, 1, 0, 2, 108, 38, 2, 1, 0, 2, 108, 32, 1, 1, 0, 2, 108, 32, - 0, 1, 0, 2, 112, 38, 2, 1, 0, 2, 112, 32, 1, 1, 0, 2, 112, 32, - 0, 1, 0, 2, 116, 38, 2, 1, 0, 2, 116, 32, 1, 1, 0, 2, 116, 32, - 0, 1, 0, 2, 120, 38, 2, 1, 0, 2, 120, 32, 1, 1, 0, 2, 120, 32, - 0, 1, 0, 2, 124, 38, 2, 1, 0, 2, 124, 32, 1, 1, 0, 2, 124, 32, - 0, 1, 0, 2, 128, 38, 2, 1, 0, 2, 128, 32, 1, 1, 0, 2, 128, 32, - 0, 1, 0, 2, 132, 38, 2, 1, 0, 2, 132, 32, 1, 1, 0, 2, 132, 32, - 0, 1, 0, 2, 136, 38, 2, 1, 0, 2, 136, 32, 1, 1, 0, 2, 136, 32, - 0, 1, 0, 2, 140, 32, 2, 1, 0, 2, 140, 32, 1, 1, 0, 2, 140, 32, - 0, 1, 0, 2, 144, 26, 2, 1, 0, 2, 144, 63, 1, 1, 0, 2, 144, 63, - 0, 1, 0, 2, 149, 38, 2, 1, 0, 2, 149, 63, 1, 1, 0, 2, 149, 63, - 0, 1, 0, 2, 153, 38, 2, 1, 0, 2, 153, 63, 1, 1, 0, 2, 153, 63, - 0, 1, 0, 2, 157, 38, 2, 1, 0, 2, 157, 63, 1, 1, 0, 2, 157, 63, - 0, 1, 0, 2, 161, 38, 2, 1, 0, 2, 161, 63, 1, 1, 0, 2, 161, 63, - 0, 1, 0, 2, 165, 38, 2, 1, 0, 2, 165, 63, 1, 1, 0, 2, 165, 63, - 0, 1, 0, 3, 36, 34, 2, 1, 0, 3, 36, 20, 1, 1, 0, 3, 36, 22, - 0, 1, 0, 3, 40, 36, 2, 1, 0, 3, 40, 20, 1, 1, 0, 3, 40, 22, - 0, 1, 0, 3, 44, 36, 2, 1, 0, 3, 44, 20, 1, 1, 0, 3, 44, 22, - 0, 1, 0, 3, 48, 36, 2, 1, 0, 3, 48, 20, 1, 1, 0, 3, 48, 22, - 0, 1, 0, 3, 52, 36, 2, 1, 0, 3, 52, 20, 1, 1, 0, 3, 52, 22, - 0, 1, 0, 3, 56, 36, 2, 1, 0, 3, 56, 20, 1, 1, 0, 3, 56, 22, - 0, 1, 0, 3, 60, 36, 2, 1, 0, 3, 60, 20, 1, 1, 0, 3, 60, 22, - 0, 1, 0, 3, 64, 34, 2, 1, 0, 3, 64, 20, 1, 1, 0, 3, 64, 22, - 0, 1, 0, 3, 100, 32, 2, 1, 0, 3, 100, 20, 1, 1, 0, 3, 100, 30, - 0, 1, 0, 3, 104, 36, 2, 1, 0, 3, 104, 20, 1, 1, 0, 3, 104, 30, - 0, 1, 0, 3, 108, 38, 2, 1, 0, 3, 108, 20, 1, 1, 0, 3, 108, 30, - 0, 1, 0, 3, 112, 38, 2, 1, 0, 3, 112, 20, 1, 1, 0, 3, 112, 30, - 0, 1, 0, 3, 116, 38, 2, 1, 0, 3, 116, 20, 1, 1, 0, 3, 116, 30, - 0, 1, 0, 3, 120, 38, 2, 1, 0, 3, 120, 20, 1, 1, 0, 3, 120, 30, - 0, 1, 0, 3, 124, 38, 2, 1, 0, 3, 124, 20, 1, 1, 0, 3, 124, 30, - 0, 1, 0, 3, 128, 38, 2, 1, 0, 3, 128, 20, 1, 1, 0, 3, 128, 30, - 0, 1, 0, 3, 132, 38, 2, 1, 0, 3, 132, 20, 1, 1, 0, 3, 132, 30, - 0, 1, 0, 3, 136, 36, 2, 1, 0, 3, 136, 20, 1, 1, 0, 3, 136, 30, - 0, 1, 0, 3, 140, 32, 2, 1, 0, 3, 140, 20, 1, 1, 0, 3, 140, 30, - 0, 1, 0, 3, 144, 26, 2, 1, 0, 3, 144, 63, 1, 1, 0, 3, 144, 63, - 0, 1, 0, 3, 149, 38, 2, 1, 0, 3, 149, 63, 1, 1, 0, 3, 149, 63, - 0, 1, 0, 3, 153, 38, 2, 1, 0, 3, 153, 63, 1, 1, 0, 3, 153, 63, - 0, 1, 0, 3, 157, 38, 2, 1, 0, 3, 157, 63, 1, 1, 0, 3, 157, 63, - 0, 1, 0, 3, 161, 38, 2, 1, 0, 3, 161, 63, 1, 1, 0, 3, 161, 63, - 0, 1, 0, 3, 165, 38, 2, 1, 0, 3, 165, 63, 1, 1, 0, 3, 165, 63, - 0, 1, 1, 2, 38, 28, 2, 1, 1, 2, 38, 30, 1, 1, 1, 2, 38, 30, - 0, 1, 1, 2, 46, 36, 2, 1, 1, 2, 46, 30, 1, 1, 1, 2, 46, 30, - 0, 1, 1, 2, 54, 36, 2, 1, 1, 2, 54, 30, 1, 1, 1, 2, 54, 30, - 0, 1, 1, 2, 62, 30, 2, 1, 1, 2, 62, 30, 1, 1, 1, 2, 62, 30, - 0, 1, 1, 2, 102, 30, 2, 1, 1, 2, 102, 30, 1, 1, 1, 2, 102, 30, - 0, 1, 1, 2, 110, 36, 2, 1, 1, 2, 110, 30, 1, 1, 1, 2, 110, 30, - 0, 1, 1, 2, 118, 36, 2, 1, 1, 2, 118, 30, 1, 1, 1, 2, 118, 30, - 0, 1, 1, 2, 126, 36, 2, 1, 1, 2, 126, 30, 1, 1, 1, 2, 126, 30, - 0, 1, 1, 2, 134, 36, 2, 1, 1, 2, 134, 30, 1, 1, 1, 2, 134, 30, - 0, 1, 1, 2, 142, 30, 2, 1, 1, 2, 142, 63, 1, 1, 1, 2, 142, 63, - 0, 1, 1, 2, 151, 36, 2, 1, 1, 2, 151, 63, 1, 1, 1, 2, 151, 63, - 0, 1, 1, 2, 159, 36, 2, 1, 1, 2, 159, 63, 1, 1, 1, 2, 159, 63, - 0, 1, 1, 3, 38, 26, 2, 1, 1, 3, 38, 20, 1, 1, 1, 3, 38, 22, - 0, 1, 1, 3, 46, 36, 2, 1, 1, 3, 46, 20, 1, 1, 1, 3, 46, 22, - 0, 1, 1, 3, 54, 36, 2, 1, 1, 3, 54, 20, 1, 1, 1, 3, 54, 22, - 0, 1, 1, 3, 62, 28, 2, 1, 1, 3, 62, 20, 1, 1, 1, 3, 62, 22, - 0, 1, 1, 3, 102, 28, 2, 1, 1, 3, 102, 20, 1, 1, 1, 3, 102, 30, - 0, 1, 1, 3, 110, 36, 2, 1, 1, 3, 110, 20, 1, 1, 1, 3, 110, 30, - 0, 1, 1, 3, 118, 36, 2, 1, 1, 3, 118, 20, 1, 1, 1, 3, 118, 30, - 0, 1, 1, 3, 126, 36, 2, 1, 1, 3, 126, 20, 1, 1, 1, 3, 126, 30, - 0, 1, 1, 3, 134, 36, 2, 1, 1, 3, 134, 20, 1, 1, 1, 3, 134, 30, - 0, 1, 1, 3, 142, 30, 2, 1, 1, 3, 142, 63, 1, 1, 1, 3, 142, 63, - 0, 1, 1, 3, 151, 36, 2, 1, 1, 3, 151, 63, 1, 1, 1, 3, 151, 63, - 0, 1, 1, 3, 159, 36, 2, 1, 1, 3, 159, 63, 1, 1, 1, 3, 159, 63, - 0, 1, 2, 4, 42, 26, 2, 1, 2, 4, 42, 30, 1, 1, 2, 4, 42, 28, - 0, 1, 2, 4, 58, 26, 2, 1, 2, 4, 58, 30, 1, 1, 2, 4, 58, 28, - 0, 1, 2, 4, 106, 26, 2, 1, 2, 4, 106, 30, 1, 1, 2, 4, 106, 30, - 0, 1, 2, 4, 122, 36, 2, 1, 2, 4, 122, 30, 1, 1, 2, 4, 122, 30, - 0, 1, 2, 4, 138, 36, 2, 1, 2, 4, 138, 63, 1, 1, 2, 4, 138, 63, - 0, 1, 2, 4, 155, 36, 2, 1, 2, 4, 155, 63, 1, 1, 2, 4, 155, 63, - 0, 1, 2, 5, 42, 24, 2, 1, 2, 5, 42, 20, 1, 1, 2, 5, 42, 22, - 0, 1, 2, 5, 58, 24, 2, 1, 2, 5, 58, 20, 1, 1, 2, 5, 58, 22, - 0, 1, 2, 5, 106, 26, 2, 1, 2, 5, 106, 20, 1, 1, 2, 5, 106, 30, - 0, 1, 2, 5, 122, 36, 2, 1, 2, 5, 122, 20, 1, 1, 2, 5, 122, 30, - 0, 1, 2, 5, 138, 36, 2, 1, 2, 5, 138, 63, 1, 1, 2, 5, 138, 63, - 0, 1, 2, 5, 155, 36, 2, 1, 2, 5, 155, 63, 1, 1, 2, 5, 155, 63 +static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type2[] = { + { 0, 0, 0, 0, 1, 32, }, + { 2, 0, 0, 0, 1, 28, }, + { 1, 0, 0, 0, 1, 30, }, + { 0, 0, 0, 0, 2, 32, }, + { 2, 0, 0, 0, 2, 28, }, + { 1, 0, 0, 0, 2, 30, }, + { 0, 0, 0, 0, 3, 32, }, + { 2, 0, 0, 0, 3, 28, }, + { 1, 0, 0, 0, 3, 30, }, + { 0, 0, 0, 0, 4, 32, }, + { 2, 0, 0, 0, 4, 28, }, + { 1, 0, 0, 0, 4, 30, }, + { 0, 0, 0, 0, 5, 32, }, + { 2, 0, 0, 0, 5, 28, }, + { 1, 0, 0, 0, 5, 30, }, + { 0, 0, 0, 0, 6, 32, }, + { 2, 0, 0, 0, 6, 28, }, + { 1, 0, 0, 0, 6, 30, }, + { 0, 0, 0, 0, 7, 32, }, + { 2, 0, 0, 0, 7, 28, }, + { 1, 0, 0, 0, 7, 30, }, + { 0, 0, 0, 0, 8, 32, }, + { 2, 0, 0, 0, 8, 28, }, + { 1, 0, 0, 0, 8, 30, }, + { 0, 0, 0, 0, 9, 32, }, + { 2, 0, 0, 0, 9, 28, }, + { 1, 0, 0, 0, 9, 30, }, + { 0, 0, 0, 0, 10, 32, }, + { 2, 0, 0, 0, 10, 28, }, + { 1, 0, 0, 0, 10, 30, }, + { 0, 0, 0, 0, 11, 32, }, + { 2, 0, 0, 0, 11, 28, }, + { 1, 0, 0, 0, 11, 30, }, + { 0, 0, 0, 0, 12, 26, }, + { 2, 0, 0, 0, 12, 28, }, + { 1, 0, 0, 0, 12, 30, }, + { 0, 0, 0, 0, 13, 20, }, + { 2, 0, 0, 0, 13, 28, }, + { 1, 0, 0, 0, 13, 28, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 32, }, + { 0, 0, 0, 1, 1, 26, }, + { 2, 0, 0, 1, 1, 30, }, + { 1, 0, 0, 1, 1, 34, }, + { 0, 0, 0, 1, 2, 30, }, + { 2, 0, 0, 1, 2, 30, }, + { 1, 0, 0, 1, 2, 34, }, + { 0, 0, 0, 1, 3, 32, }, + { 2, 0, 0, 1, 3, 30, }, + { 1, 0, 0, 1, 3, 34, }, + { 0, 0, 0, 1, 4, 34, }, + { 2, 0, 0, 1, 4, 30, }, + { 1, 0, 0, 1, 4, 34, }, + { 0, 0, 0, 1, 5, 34, }, + { 2, 0, 0, 1, 5, 30, }, + { 1, 0, 0, 1, 5, 34, }, + { 0, 0, 0, 1, 6, 34, }, + { 2, 0, 0, 1, 6, 30, }, + { 1, 0, 0, 1, 6, 34, }, + { 0, 0, 0, 1, 7, 34, }, + { 2, 0, 0, 1, 7, 30, }, + { 1, 0, 0, 1, 7, 34, }, + { 0, 0, 0, 1, 8, 34, }, + { 2, 0, 0, 1, 8, 30, }, + { 1, 0, 0, 1, 8, 34, }, + { 0, 0, 0, 1, 9, 32, }, + { 2, 0, 0, 1, 9, 30, }, + { 1, 0, 0, 1, 9, 34, }, + { 0, 0, 0, 1, 10, 30, }, + { 2, 0, 0, 1, 10, 30, }, + { 1, 0, 0, 1, 10, 34, }, + { 0, 0, 0, 1, 11, 28, }, + { 2, 0, 0, 1, 11, 30, }, + { 1, 0, 0, 1, 11, 34, }, + { 0, 0, 0, 1, 12, 22, }, + { 2, 0, 0, 1, 12, 30, }, + { 1, 0, 0, 1, 12, 34, }, + { 0, 0, 0, 1, 13, 14, }, + { 2, 0, 0, 1, 13, 30, }, + { 1, 0, 0, 1, 13, 34, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 26, }, + { 2, 0, 0, 2, 1, 30, }, + { 1, 0, 0, 2, 1, 34, }, + { 0, 0, 0, 2, 2, 30, }, + { 2, 0, 0, 2, 2, 30, }, + { 1, 0, 0, 2, 2, 34, }, + { 0, 0, 0, 2, 3, 32, }, + { 2, 0, 0, 2, 3, 30, }, + { 1, 0, 0, 2, 3, 34, }, + { 0, 0, 0, 2, 4, 34, }, + { 2, 0, 0, 2, 4, 30, }, + { 1, 0, 0, 2, 4, 34, }, + { 0, 0, 0, 2, 5, 34, }, + { 2, 0, 0, 2, 5, 30, }, + { 1, 0, 0, 2, 5, 34, }, + { 0, 0, 0, 2, 6, 34, }, + { 2, 0, 0, 2, 6, 30, }, + { 1, 0, 0, 2, 6, 34, }, + { 0, 0, 0, 2, 7, 34, }, + { 2, 0, 0, 2, 7, 30, }, + { 1, 0, 0, 2, 7, 34, }, + { 0, 0, 0, 2, 8, 34, }, + { 2, 0, 0, 2, 8, 30, }, + { 1, 0, 0, 2, 8, 34, }, + { 0, 0, 0, 2, 9, 32, }, + { 2, 0, 0, 2, 9, 30, }, + { 1, 0, 0, 2, 9, 34, }, + { 0, 0, 0, 2, 10, 30, }, + { 2, 0, 0, 2, 10, 30, }, + { 1, 0, 0, 2, 10, 34, }, + { 0, 0, 0, 2, 11, 26, }, + { 2, 0, 0, 2, 11, 30, }, + { 1, 0, 0, 2, 11, 34, }, + { 0, 0, 0, 2, 12, 20, }, + { 2, 0, 0, 2, 12, 30, }, + { 1, 0, 0, 2, 12, 34, }, + { 0, 0, 0, 2, 13, 14, }, + { 2, 0, 0, 2, 13, 30, }, + { 1, 0, 0, 2, 13, 34, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 63, }, + { 0, 0, 0, 3, 1, 26, }, + { 2, 0, 0, 3, 1, 18, }, + { 1, 0, 0, 3, 1, 30, }, + { 0, 0, 0, 3, 2, 28, }, + { 2, 0, 0, 3, 2, 18, }, + { 1, 0, 0, 3, 2, 30, }, + { 0, 0, 0, 3, 3, 30, }, + { 2, 0, 0, 3, 3, 18, }, + { 1, 0, 0, 3, 3, 30, }, + { 0, 0, 0, 3, 4, 30, }, + { 2, 0, 0, 3, 4, 18, }, + { 1, 0, 0, 3, 4, 30, }, + { 0, 0, 0, 3, 5, 32, }, + { 2, 0, 0, 3, 5, 18, }, + { 1, 0, 0, 3, 5, 30, }, + { 0, 0, 0, 3, 6, 32, }, + { 2, 0, 0, 3, 6, 18, }, + { 1, 0, 0, 3, 6, 30, }, + { 0, 0, 0, 3, 7, 32, }, + { 2, 0, 0, 3, 7, 18, }, + { 1, 0, 0, 3, 7, 30, }, + { 0, 0, 0, 3, 8, 30, }, + { 2, 0, 0, 3, 8, 18, }, + { 1, 0, 0, 3, 8, 30, }, + { 0, 0, 0, 3, 9, 30, }, + { 2, 0, 0, 3, 9, 18, }, + { 1, 0, 0, 3, 9, 30, }, + { 0, 0, 0, 3, 10, 28, }, + { 2, 0, 0, 3, 10, 18, }, + { 1, 0, 0, 3, 10, 30, }, + { 0, 0, 0, 3, 11, 26, }, + { 2, 0, 0, 3, 11, 18, }, + { 1, 0, 0, 3, 11, 30, }, + { 0, 0, 0, 3, 12, 20, }, + { 2, 0, 0, 3, 12, 18, }, + { 1, 0, 0, 3, 12, 30, }, + { 0, 0, 0, 3, 13, 14, }, + { 2, 0, 0, 3, 13, 18, }, + { 1, 0, 0, 3, 13, 30, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 26, }, + { 2, 0, 1, 2, 3, 30, }, + { 1, 0, 1, 2, 3, 34, }, + { 0, 0, 1, 2, 4, 26, }, + { 2, 0, 1, 2, 4, 30, }, + { 1, 0, 1, 2, 4, 34, }, + { 0, 0, 1, 2, 5, 30, }, + { 2, 0, 1, 2, 5, 30, }, + { 1, 0, 1, 2, 5, 34, }, + { 0, 0, 1, 2, 6, 32, }, + { 2, 0, 1, 2, 6, 30, }, + { 1, 0, 1, 2, 6, 34, }, + { 0, 0, 1, 2, 7, 30, }, + { 2, 0, 1, 2, 7, 30, }, + { 1, 0, 1, 2, 7, 34, }, + { 0, 0, 1, 2, 8, 26, }, + { 2, 0, 1, 2, 8, 30, }, + { 1, 0, 1, 2, 8, 34, }, + { 0, 0, 1, 2, 9, 26, }, + { 2, 0, 1, 2, 9, 30, }, + { 1, 0, 1, 2, 9, 34, }, + { 0, 0, 1, 2, 10, 20, }, + { 2, 0, 1, 2, 10, 30, }, + { 1, 0, 1, 2, 10, 34, }, + { 0, 0, 1, 2, 11, 14, }, + { 2, 0, 1, 2, 11, 30, }, + { 1, 0, 1, 2, 11, 34, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 63, }, + { 1, 0, 1, 2, 12, 63, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 63, }, + { 1, 0, 1, 2, 13, 63, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 24, }, + { 2, 0, 1, 3, 3, 18, }, + { 1, 0, 1, 3, 3, 30, }, + { 0, 0, 1, 3, 4, 24, }, + { 2, 0, 1, 3, 4, 18, }, + { 1, 0, 1, 3, 4, 30, }, + { 0, 0, 1, 3, 5, 26, }, + { 2, 0, 1, 3, 5, 18, }, + { 1, 0, 1, 3, 5, 30, }, + { 0, 0, 1, 3, 6, 28, }, + { 2, 0, 1, 3, 6, 18, }, + { 1, 0, 1, 3, 6, 30, }, + { 0, 0, 1, 3, 7, 26, }, + { 2, 0, 1, 3, 7, 18, }, + { 1, 0, 1, 3, 7, 30, }, + { 0, 0, 1, 3, 8, 26, }, + { 2, 0, 1, 3, 8, 18, }, + { 1, 0, 1, 3, 8, 30, }, + { 0, 0, 1, 3, 9, 26, }, + { 2, 0, 1, 3, 9, 18, }, + { 1, 0, 1, 3, 9, 30, }, + { 0, 0, 1, 3, 10, 20, }, + { 2, 0, 1, 3, 10, 18, }, + { 1, 0, 1, 3, 10, 30, }, + { 0, 0, 1, 3, 11, 14, }, + { 2, 0, 1, 3, 11, 18, }, + { 1, 0, 1, 3, 11, 30, }, + { 0, 0, 1, 3, 12, 63, }, + { 2, 0, 1, 3, 12, 63, }, + { 1, 0, 1, 3, 12, 63, }, + { 0, 0, 1, 3, 13, 63, }, + { 2, 0, 1, 3, 13, 63, }, + { 1, 0, 1, 3, 13, 63, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 1, 0, 1, 36, 36, }, + { 2, 1, 0, 1, 36, 32, }, + { 1, 1, 0, 1, 36, 30, }, + { 0, 1, 0, 1, 40, 38, }, + { 2, 1, 0, 1, 40, 32, }, + { 1, 1, 0, 1, 40, 30, }, + { 0, 1, 0, 1, 44, 38, }, + { 2, 1, 0, 1, 44, 32, }, + { 1, 1, 0, 1, 44, 30, }, + { 0, 1, 0, 1, 48, 38, }, + { 2, 1, 0, 1, 48, 32, }, + { 1, 1, 0, 1, 48, 30, }, + { 0, 1, 0, 1, 52, 38, }, + { 2, 1, 0, 1, 52, 32, }, + { 1, 1, 0, 1, 52, 28, }, + { 0, 1, 0, 1, 56, 38, }, + { 2, 1, 0, 1, 56, 32, }, + { 1, 1, 0, 1, 56, 28, }, + { 0, 1, 0, 1, 60, 38, }, + { 2, 1, 0, 1, 60, 32, }, + { 1, 1, 0, 1, 60, 28, }, + { 0, 1, 0, 1, 64, 34, }, + { 2, 1, 0, 1, 64, 32, }, + { 1, 1, 0, 1, 64, 28, }, + { 0, 1, 0, 1, 100, 32, }, + { 2, 1, 0, 1, 100, 32, }, + { 1, 1, 0, 1, 100, 32, }, + { 0, 1, 0, 1, 104, 38, }, + { 2, 1, 0, 1, 104, 32, }, + { 1, 1, 0, 1, 104, 32, }, + { 0, 1, 0, 1, 108, 38, }, + { 2, 1, 0, 1, 108, 32, }, + { 1, 1, 0, 1, 108, 32, }, + { 0, 1, 0, 1, 112, 38, }, + { 2, 1, 0, 1, 112, 32, }, + { 1, 1, 0, 1, 112, 32, }, + { 0, 1, 0, 1, 116, 38, }, + { 2, 1, 0, 1, 116, 32, }, + { 1, 1, 0, 1, 116, 32, }, + { 0, 1, 0, 1, 120, 38, }, + { 2, 1, 0, 1, 120, 32, }, + { 1, 1, 0, 1, 120, 32, }, + { 0, 1, 0, 1, 124, 38, }, + { 2, 1, 0, 1, 124, 32, }, + { 1, 1, 0, 1, 124, 32, }, + { 0, 1, 0, 1, 128, 38, }, + { 2, 1, 0, 1, 128, 32, }, + { 1, 1, 0, 1, 128, 32, }, + { 0, 1, 0, 1, 132, 38, }, + { 2, 1, 0, 1, 132, 32, }, + { 1, 1, 0, 1, 132, 32, }, + { 0, 1, 0, 1, 136, 38, }, + { 2, 1, 0, 1, 136, 32, }, + { 1, 1, 0, 1, 136, 32, }, + { 0, 1, 0, 1, 140, 34, }, + { 2, 1, 0, 1, 140, 32, }, + { 1, 1, 0, 1, 140, 32, }, + { 0, 1, 0, 1, 144, 34, }, + { 2, 1, 0, 1, 144, 32, }, + { 1, 1, 0, 1, 144, 63, }, + { 0, 1, 0, 1, 149, 38, }, + { 2, 1, 0, 1, 149, 63, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 38, }, + { 2, 1, 0, 1, 153, 63, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 38, }, + { 2, 1, 0, 1, 157, 63, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 38, }, + { 2, 1, 0, 1, 161, 63, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 38, }, + { 2, 1, 0, 1, 165, 63, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 36, }, + { 2, 1, 0, 2, 36, 32, }, + { 1, 1, 0, 2, 36, 28, }, + { 0, 1, 0, 2, 40, 38, }, + { 2, 1, 0, 2, 40, 32, }, + { 1, 1, 0, 2, 40, 28, }, + { 0, 1, 0, 2, 44, 38, }, + { 2, 1, 0, 2, 44, 32, }, + { 1, 1, 0, 2, 44, 28, }, + { 0, 1, 0, 2, 48, 38, }, + { 2, 1, 0, 2, 48, 32, }, + { 1, 1, 0, 2, 48, 28, }, + { 0, 1, 0, 2, 52, 38, }, + { 2, 1, 0, 2, 52, 32, }, + { 1, 1, 0, 2, 52, 28, }, + { 0, 1, 0, 2, 56, 38, }, + { 2, 1, 0, 2, 56, 32, }, + { 1, 1, 0, 2, 56, 28, }, + { 0, 1, 0, 2, 60, 38, }, + { 2, 1, 0, 2, 60, 32, }, + { 1, 1, 0, 2, 60, 28, }, + { 0, 1, 0, 2, 64, 34, }, + { 2, 1, 0, 2, 64, 32, }, + { 1, 1, 0, 2, 64, 28, }, + { 0, 1, 0, 2, 100, 32, }, + { 2, 1, 0, 2, 100, 32, }, + { 1, 1, 0, 2, 100, 32, }, + { 0, 1, 0, 2, 104, 38, }, + { 2, 1, 0, 2, 104, 32, }, + { 1, 1, 0, 2, 104, 32, }, + { 0, 1, 0, 2, 108, 38, }, + { 2, 1, 0, 2, 108, 32, }, + { 1, 1, 0, 2, 108, 32, }, + { 0, 1, 0, 2, 112, 38, }, + { 2, 1, 0, 2, 112, 32, }, + { 1, 1, 0, 2, 112, 32, }, + { 0, 1, 0, 2, 116, 38, }, + { 2, 1, 0, 2, 116, 32, }, + { 1, 1, 0, 2, 116, 32, }, + { 0, 1, 0, 2, 120, 38, }, + { 2, 1, 0, 2, 120, 32, }, + { 1, 1, 0, 2, 120, 32, }, + { 0, 1, 0, 2, 124, 38, }, + { 2, 1, 0, 2, 124, 32, }, + { 1, 1, 0, 2, 124, 32, }, + { 0, 1, 0, 2, 128, 38, }, + { 2, 1, 0, 2, 128, 32, }, + { 1, 1, 0, 2, 128, 32, }, + { 0, 1, 0, 2, 132, 38, }, + { 2, 1, 0, 2, 132, 32, }, + { 1, 1, 0, 2, 132, 32, }, + { 0, 1, 0, 2, 136, 38, }, + { 2, 1, 0, 2, 136, 32, }, + { 1, 1, 0, 2, 136, 32, }, + { 0, 1, 0, 2, 140, 32, }, + { 2, 1, 0, 2, 140, 32, }, + { 1, 1, 0, 2, 140, 32, }, + { 0, 1, 0, 2, 144, 26, }, + { 2, 1, 0, 2, 144, 63, }, + { 1, 1, 0, 2, 144, 63, }, + { 0, 1, 0, 2, 149, 38, }, + { 2, 1, 0, 2, 149, 63, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 38, }, + { 2, 1, 0, 2, 153, 63, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 38, }, + { 2, 1, 0, 2, 157, 63, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 38, }, + { 2, 1, 0, 2, 161, 63, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 38, }, + { 2, 1, 0, 2, 165, 63, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 34, }, + { 2, 1, 0, 3, 36, 20, }, + { 1, 1, 0, 3, 36, 22, }, + { 0, 1, 0, 3, 40, 36, }, + { 2, 1, 0, 3, 40, 20, }, + { 1, 1, 0, 3, 40, 22, }, + { 0, 1, 0, 3, 44, 36, }, + { 2, 1, 0, 3, 44, 20, }, + { 1, 1, 0, 3, 44, 22, }, + { 0, 1, 0, 3, 48, 36, }, + { 2, 1, 0, 3, 48, 20, }, + { 1, 1, 0, 3, 48, 22, }, + { 0, 1, 0, 3, 52, 36, }, + { 2, 1, 0, 3, 52, 20, }, + { 1, 1, 0, 3, 52, 22, }, + { 0, 1, 0, 3, 56, 36, }, + { 2, 1, 0, 3, 56, 20, }, + { 1, 1, 0, 3, 56, 22, }, + { 0, 1, 0, 3, 60, 36, }, + { 2, 1, 0, 3, 60, 20, }, + { 1, 1, 0, 3, 60, 22, }, + { 0, 1, 0, 3, 64, 34, }, + { 2, 1, 0, 3, 64, 20, }, + { 1, 1, 0, 3, 64, 22, }, + { 0, 1, 0, 3, 100, 32, }, + { 2, 1, 0, 3, 100, 20, }, + { 1, 1, 0, 3, 100, 30, }, + { 0, 1, 0, 3, 104, 36, }, + { 2, 1, 0, 3, 104, 20, }, + { 1, 1, 0, 3, 104, 30, }, + { 0, 1, 0, 3, 108, 38, }, + { 2, 1, 0, 3, 108, 20, }, + { 1, 1, 0, 3, 108, 30, }, + { 0, 1, 0, 3, 112, 38, }, + { 2, 1, 0, 3, 112, 20, }, + { 1, 1, 0, 3, 112, 30, }, + { 0, 1, 0, 3, 116, 38, }, + { 2, 1, 0, 3, 116, 20, }, + { 1, 1, 0, 3, 116, 30, }, + { 0, 1, 0, 3, 120, 38, }, + { 2, 1, 0, 3, 120, 20, }, + { 1, 1, 0, 3, 120, 30, }, + { 0, 1, 0, 3, 124, 38, }, + { 2, 1, 0, 3, 124, 20, }, + { 1, 1, 0, 3, 124, 30, }, + { 0, 1, 0, 3, 128, 38, }, + { 2, 1, 0, 3, 128, 20, }, + { 1, 1, 0, 3, 128, 30, }, + { 0, 1, 0, 3, 132, 38, }, + { 2, 1, 0, 3, 132, 20, }, + { 1, 1, 0, 3, 132, 30, }, + { 0, 1, 0, 3, 136, 36, }, + { 2, 1, 0, 3, 136, 20, }, + { 1, 1, 0, 3, 136, 30, }, + { 0, 1, 0, 3, 140, 32, }, + { 2, 1, 0, 3, 140, 20, }, + { 1, 1, 0, 3, 140, 30, }, + { 0, 1, 0, 3, 144, 26, }, + { 2, 1, 0, 3, 144, 63, }, + { 1, 1, 0, 3, 144, 63, }, + { 0, 1, 0, 3, 149, 38, }, + { 2, 1, 0, 3, 149, 63, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 38, }, + { 2, 1, 0, 3, 153, 63, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 38, }, + { 2, 1, 0, 3, 157, 63, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 38, }, + { 2, 1, 0, 3, 161, 63, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 38, }, + { 2, 1, 0, 3, 165, 63, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 1, 2, 38, 28, }, + { 2, 1, 1, 2, 38, 30, }, + { 1, 1, 1, 2, 38, 30, }, + { 0, 1, 1, 2, 46, 36, }, + { 2, 1, 1, 2, 46, 30, }, + { 1, 1, 1, 2, 46, 30, }, + { 0, 1, 1, 2, 54, 36, }, + { 2, 1, 1, 2, 54, 30, }, + { 1, 1, 1, 2, 54, 30, }, + { 0, 1, 1, 2, 62, 30, }, + { 2, 1, 1, 2, 62, 30, }, + { 1, 1, 1, 2, 62, 30, }, + { 0, 1, 1, 2, 102, 30, }, + { 2, 1, 1, 2, 102, 30, }, + { 1, 1, 1, 2, 102, 30, }, + { 0, 1, 1, 2, 110, 36, }, + { 2, 1, 1, 2, 110, 30, }, + { 1, 1, 1, 2, 110, 30, }, + { 0, 1, 1, 2, 118, 36, }, + { 2, 1, 1, 2, 118, 30, }, + { 1, 1, 1, 2, 118, 30, }, + { 0, 1, 1, 2, 126, 36, }, + { 2, 1, 1, 2, 126, 30, }, + { 1, 1, 1, 2, 126, 30, }, + { 0, 1, 1, 2, 134, 36, }, + { 2, 1, 1, 2, 134, 30, }, + { 1, 1, 1, 2, 134, 30, }, + { 0, 1, 1, 2, 142, 30, }, + { 2, 1, 1, 2, 142, 63, }, + { 1, 1, 1, 2, 142, 63, }, + { 0, 1, 1, 2, 151, 36, }, + { 2, 1, 1, 2, 151, 63, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 36, }, + { 2, 1, 1, 2, 159, 63, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 26, }, + { 2, 1, 1, 3, 38, 20, }, + { 1, 1, 1, 3, 38, 22, }, + { 0, 1, 1, 3, 46, 36, }, + { 2, 1, 1, 3, 46, 20, }, + { 1, 1, 1, 3, 46, 22, }, + { 0, 1, 1, 3, 54, 36, }, + { 2, 1, 1, 3, 54, 20, }, + { 1, 1, 1, 3, 54, 22, }, + { 0, 1, 1, 3, 62, 28, }, + { 2, 1, 1, 3, 62, 20, }, + { 1, 1, 1, 3, 62, 22, }, + { 0, 1, 1, 3, 102, 28, }, + { 2, 1, 1, 3, 102, 20, }, + { 1, 1, 1, 3, 102, 30, }, + { 0, 1, 1, 3, 110, 36, }, + { 2, 1, 1, 3, 110, 20, }, + { 1, 1, 1, 3, 110, 30, }, + { 0, 1, 1, 3, 118, 36, }, + { 2, 1, 1, 3, 118, 20, }, + { 1, 1, 1, 3, 118, 30, }, + { 0, 1, 1, 3, 126, 36, }, + { 2, 1, 1, 3, 126, 20, }, + { 1, 1, 1, 3, 126, 30, }, + { 0, 1, 1, 3, 134, 36, }, + { 2, 1, 1, 3, 134, 20, }, + { 1, 1, 1, 3, 134, 30, }, + { 0, 1, 1, 3, 142, 30, }, + { 2, 1, 1, 3, 142, 63, }, + { 1, 1, 1, 3, 142, 63, }, + { 0, 1, 1, 3, 151, 36, }, + { 2, 1, 1, 3, 151, 63, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 36, }, + { 2, 1, 1, 3, 159, 63, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 2, 4, 42, 26, }, + { 2, 1, 2, 4, 42, 30, }, + { 1, 1, 2, 4, 42, 28, }, + { 0, 1, 2, 4, 58, 26, }, + { 2, 1, 2, 4, 58, 30, }, + { 1, 1, 2, 4, 58, 28, }, + { 0, 1, 2, 4, 106, 26, }, + { 2, 1, 2, 4, 106, 30, }, + { 1, 1, 2, 4, 106, 30, }, + { 0, 1, 2, 4, 122, 36, }, + { 2, 1, 2, 4, 122, 30, }, + { 1, 1, 2, 4, 122, 30, }, + { 0, 1, 2, 4, 138, 36, }, + { 2, 1, 2, 4, 138, 63, }, + { 1, 1, 2, 4, 138, 63, }, + { 0, 1, 2, 4, 155, 36, }, + { 2, 1, 2, 4, 155, 63, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 24, }, + { 2, 1, 2, 5, 42, 20, }, + { 1, 1, 2, 5, 42, 22, }, + { 0, 1, 2, 5, 58, 24, }, + { 2, 1, 2, 5, 58, 20, }, + { 1, 1, 2, 5, 58, 22, }, + { 0, 1, 2, 5, 106, 26, }, + { 2, 1, 2, 5, 106, 20, }, + { 1, 1, 2, 5, 106, 30, }, + { 0, 1, 2, 5, 122, 36, }, + { 2, 1, 2, 5, 122, 20, }, + { 1, 1, 2, 5, 122, 30, }, + { 0, 1, 2, 5, 138, 36, }, + { 2, 1, 2, 5, 138, 63, }, + { 1, 1, 2, 5, 138, 63, }, + { 0, 1, 2, 5, 155, 36, }, + { 2, 1, 2, 5, 155, 63, }, + { 1, 1, 2, 5, 155, 63 }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type2); -static const u8 rtw8822b_txpwr_lmt_type5[] = { - 0, 0, 0, 0, 1, 32, 2, 0, 0, 0, 1, 28, 1, 0, 0, 0, 1, 30, - 0, 0, 0, 0, 2, 32, 2, 0, 0, 0, 2, 28, 1, 0, 0, 0, 2, 30, - 0, 0, 0, 0, 3, 32, 2, 0, 0, 0, 3, 28, 1, 0, 0, 0, 3, 30, - 0, 0, 0, 0, 4, 32, 2, 0, 0, 0, 4, 28, 1, 0, 0, 0, 4, 30, - 0, 0, 0, 0, 5, 32, 2, 0, 0, 0, 5, 28, 1, 0, 0, 0, 5, 30, - 0, 0, 0, 0, 6, 32, 2, 0, 0, 0, 6, 28, 1, 0, 0, 0, 6, 30, - 0, 0, 0, 0, 7, 32, 2, 0, 0, 0, 7, 28, 1, 0, 0, 0, 7, 30, - 0, 0, 0, 0, 8, 32, 2, 0, 0, 0, 8, 28, 1, 0, 0, 0, 8, 30, - 0, 0, 0, 0, 9, 32, 2, 0, 0, 0, 9, 28, 1, 0, 0, 0, 9, 30, - 0, 0, 0, 0, 10, 32, 2, 0, 0, 0, 10, 28, 1, 0, 0, 0, 10, 30, - 0, 0, 0, 0, 11, 32, 2, 0, 0, 0, 11, 28, 1, 0, 0, 0, 11, 30, - 0, 0, 0, 0, 12, 26, 2, 0, 0, 0, 12, 28, 1, 0, 0, 0, 12, 30, - 0, 0, 0, 0, 13, 20, 2, 0, 0, 0, 13, 28, 1, 0, 0, 0, 13, 28, - 0, 0, 0, 0, 14, 63, 2, 0, 0, 0, 14, 63, 1, 0, 0, 0, 14, 32, - 0, 0, 0, 1, 1, 26, 2, 0, 0, 1, 1, 30, 1, 0, 0, 1, 1, 34, - 0, 0, 0, 1, 2, 30, 2, 0, 0, 1, 2, 30, 1, 0, 0, 1, 2, 34, - 0, 0, 0, 1, 3, 32, 2, 0, 0, 1, 3, 30, 1, 0, 0, 1, 3, 34, - 0, 0, 0, 1, 4, 34, 2, 0, 0, 1, 4, 30, 1, 0, 0, 1, 4, 34, - 0, 0, 0, 1, 5, 34, 2, 0, 0, 1, 5, 30, 1, 0, 0, 1, 5, 34, - 0, 0, 0, 1, 6, 34, 2, 0, 0, 1, 6, 30, 1, 0, 0, 1, 6, 34, - 0, 0, 0, 1, 7, 34, 2, 0, 0, 1, 7, 30, 1, 0, 0, 1, 7, 34, - 0, 0, 0, 1, 8, 34, 2, 0, 0, 1, 8, 30, 1, 0, 0, 1, 8, 34, - 0, 0, 0, 1, 9, 32, 2, 0, 0, 1, 9, 30, 1, 0, 0, 1, 9, 34, - 0, 0, 0, 1, 10, 30, 2, 0, 0, 1, 10, 30, 1, 0, 0, 1, 10, 34, - 0, 0, 0, 1, 11, 28, 2, 0, 0, 1, 11, 30, 1, 0, 0, 1, 11, 34, - 0, 0, 0, 1, 12, 22, 2, 0, 0, 1, 12, 30, 1, 0, 0, 1, 12, 34, - 0, 0, 0, 1, 13, 14, 2, 0, 0, 1, 13, 30, 1, 0, 0, 1, 13, 34, - 0, 0, 0, 1, 14, 63, 2, 0, 0, 1, 14, 63, 1, 0, 0, 1, 14, 63, - 0, 0, 0, 2, 1, 26, 2, 0, 0, 2, 1, 30, 1, 0, 0, 2, 1, 34, - 0, 0, 0, 2, 2, 30, 2, 0, 0, 2, 2, 30, 1, 0, 0, 2, 2, 34, - 0, 0, 0, 2, 3, 32, 2, 0, 0, 2, 3, 30, 1, 0, 0, 2, 3, 34, - 0, 0, 0, 2, 4, 34, 2, 0, 0, 2, 4, 30, 1, 0, 0, 2, 4, 34, - 0, 0, 0, 2, 5, 34, 2, 0, 0, 2, 5, 30, 1, 0, 0, 2, 5, 34, - 0, 0, 0, 2, 6, 34, 2, 0, 0, 2, 6, 30, 1, 0, 0, 2, 6, 34, - 0, 0, 0, 2, 7, 34, 2, 0, 0, 2, 7, 30, 1, 0, 0, 2, 7, 34, - 0, 0, 0, 2, 8, 34, 2, 0, 0, 2, 8, 30, 1, 0, 0, 2, 8, 34, - 0, 0, 0, 2, 9, 32, 2, 0, 0, 2, 9, 30, 1, 0, 0, 2, 9, 34, - 0, 0, 0, 2, 10, 30, 2, 0, 0, 2, 10, 30, 1, 0, 0, 2, 10, 34, - 0, 0, 0, 2, 11, 26, 2, 0, 0, 2, 11, 30, 1, 0, 0, 2, 11, 34, - 0, 0, 0, 2, 12, 20, 2, 0, 0, 2, 12, 30, 1, 0, 0, 2, 12, 34, - 0, 0, 0, 2, 13, 14, 2, 0, 0, 2, 13, 30, 1, 0, 0, 2, 13, 34, - 0, 0, 0, 2, 14, 63, 2, 0, 0, 2, 14, 63, 1, 0, 0, 2, 14, 63, - 0, 0, 0, 3, 1, 26, 2, 0, 0, 3, 1, 18, 1, 0, 0, 3, 1, 30, - 0, 0, 0, 3, 2, 28, 2, 0, 0, 3, 2, 18, 1, 0, 0, 3, 2, 30, - 0, 0, 0, 3, 3, 30, 2, 0, 0, 3, 3, 18, 1, 0, 0, 3, 3, 30, - 0, 0, 0, 3, 4, 30, 2, 0, 0, 3, 4, 18, 1, 0, 0, 3, 4, 30, - 0, 0, 0, 3, 5, 32, 2, 0, 0, 3, 5, 18, 1, 0, 0, 3, 5, 30, - 0, 0, 0, 3, 6, 32, 2, 0, 0, 3, 6, 18, 1, 0, 0, 3, 6, 30, - 0, 0, 0, 3, 7, 32, 2, 0, 0, 3, 7, 18, 1, 0, 0, 3, 7, 30, - 0, 0, 0, 3, 8, 30, 2, 0, 0, 3, 8, 18, 1, 0, 0, 3, 8, 30, - 0, 0, 0, 3, 9, 30, 2, 0, 0, 3, 9, 18, 1, 0, 0, 3, 9, 30, - 0, 0, 0, 3, 10, 28, 2, 0, 0, 3, 10, 18, 1, 0, 0, 3, 10, 30, - 0, 0, 0, 3, 11, 26, 2, 0, 0, 3, 11, 18, 1, 0, 0, 3, 11, 30, - 0, 0, 0, 3, 12, 20, 2, 0, 0, 3, 12, 18, 1, 0, 0, 3, 12, 30, - 0, 0, 0, 3, 13, 14, 2, 0, 0, 3, 13, 18, 1, 0, 0, 3, 13, 30, - 0, 0, 0, 3, 14, 63, 2, 0, 0, 3, 14, 63, 1, 0, 0, 3, 14, 63, - 0, 0, 1, 2, 1, 63, 2, 0, 1, 2, 1, 63, 1, 0, 1, 2, 1, 63, - 0, 0, 1, 2, 2, 63, 2, 0, 1, 2, 2, 63, 1, 0, 1, 2, 2, 63, - 0, 0, 1, 2, 3, 26, 2, 0, 1, 2, 3, 30, 1, 0, 1, 2, 3, 34, - 0, 0, 1, 2, 4, 26, 2, 0, 1, 2, 4, 30, 1, 0, 1, 2, 4, 34, - 0, 0, 1, 2, 5, 30, 2, 0, 1, 2, 5, 30, 1, 0, 1, 2, 5, 34, - 0, 0, 1, 2, 6, 32, 2, 0, 1, 2, 6, 30, 1, 0, 1, 2, 6, 34, - 0, 0, 1, 2, 7, 30, 2, 0, 1, 2, 7, 30, 1, 0, 1, 2, 7, 34, - 0, 0, 1, 2, 8, 26, 2, 0, 1, 2, 8, 30, 1, 0, 1, 2, 8, 34, - 0, 0, 1, 2, 9, 26, 2, 0, 1, 2, 9, 30, 1, 0, 1, 2, 9, 34, - 0, 0, 1, 2, 10, 20, 2, 0, 1, 2, 10, 30, 1, 0, 1, 2, 10, 34, - 0, 0, 1, 2, 11, 14, 2, 0, 1, 2, 11, 30, 1, 0, 1, 2, 11, 34, - 0, 0, 1, 2, 12, 63, 2, 0, 1, 2, 12, 63, 1, 0, 1, 2, 12, 63, - 0, 0, 1, 2, 13, 63, 2, 0, 1, 2, 13, 63, 1, 0, 1, 2, 13, 63, - 0, 0, 1, 2, 14, 63, 2, 0, 1, 2, 14, 63, 1, 0, 1, 2, 14, 63, - 0, 0, 1, 3, 1, 63, 2, 0, 1, 3, 1, 63, 1, 0, 1, 3, 1, 63, - 0, 0, 1, 3, 2, 63, 2, 0, 1, 3, 2, 63, 1, 0, 1, 3, 2, 63, - 0, 0, 1, 3, 3, 24, 2, 0, 1, 3, 3, 18, 1, 0, 1, 3, 3, 30, - 0, 0, 1, 3, 4, 24, 2, 0, 1, 3, 4, 18, 1, 0, 1, 3, 4, 30, - 0, 0, 1, 3, 5, 26, 2, 0, 1, 3, 5, 18, 1, 0, 1, 3, 5, 30, - 0, 0, 1, 3, 6, 28, 2, 0, 1, 3, 6, 18, 1, 0, 1, 3, 6, 30, - 0, 0, 1, 3, 7, 26, 2, 0, 1, 3, 7, 18, 1, 0, 1, 3, 7, 30, - 0, 0, 1, 3, 8, 26, 2, 0, 1, 3, 8, 18, 1, 0, 1, 3, 8, 30, - 0, 0, 1, 3, 9, 26, 2, 0, 1, 3, 9, 18, 1, 0, 1, 3, 9, 30, - 0, 0, 1, 3, 10, 20, 2, 0, 1, 3, 10, 18, 1, 0, 1, 3, 10, 30, - 0, 0, 1, 3, 11, 14, 2, 0, 1, 3, 11, 18, 1, 0, 1, 3, 11, 30, - 0, 0, 1, 3, 12, 63, 2, 0, 1, 3, 12, 63, 1, 0, 1, 3, 12, 63, - 0, 0, 1, 3, 13, 63, 2, 0, 1, 3, 13, 63, 1, 0, 1, 3, 13, 63, - 0, 0, 1, 3, 14, 63, 2, 0, 1, 3, 14, 63, 1, 0, 1, 3, 14, 63, - 0, 1, 0, 1, 36, 30, 2, 1, 0, 1, 36, 32, 1, 1, 0, 1, 36, 30, - 0, 1, 0, 1, 40, 32, 2, 1, 0, 1, 40, 32, 1, 1, 0, 1, 40, 30, - 0, 1, 0, 1, 44, 32, 2, 1, 0, 1, 44, 32, 1, 1, 0, 1, 44, 30, - 0, 1, 0, 1, 48, 32, 2, 1, 0, 1, 48, 32, 1, 1, 0, 1, 48, 30, - 0, 1, 0, 1, 52, 32, 2, 1, 0, 1, 52, 32, 1, 1, 0, 1, 52, 28, - 0, 1, 0, 1, 56, 32, 2, 1, 0, 1, 56, 32, 1, 1, 0, 1, 56, 28, - 0, 1, 0, 1, 60, 32, 2, 1, 0, 1, 60, 32, 1, 1, 0, 1, 60, 28, - 0, 1, 0, 1, 64, 28, 2, 1, 0, 1, 64, 32, 1, 1, 0, 1, 64, 28, - 0, 1, 0, 1, 100, 26, 2, 1, 0, 1, 100, 32, 1, 1, 0, 1, 100, 32, - 0, 1, 0, 1, 104, 32, 2, 1, 0, 1, 104, 32, 1, 1, 0, 1, 104, 32, - 0, 1, 0, 1, 108, 32, 2, 1, 0, 1, 108, 32, 1, 1, 0, 1, 108, 32, - 0, 1, 0, 1, 112, 32, 2, 1, 0, 1, 112, 32, 1, 1, 0, 1, 112, 32, - 0, 1, 0, 1, 116, 32, 2, 1, 0, 1, 116, 32, 1, 1, 0, 1, 116, 32, - 0, 1, 0, 1, 120, 32, 2, 1, 0, 1, 120, 32, 1, 1, 0, 1, 120, 32, - 0, 1, 0, 1, 124, 32, 2, 1, 0, 1, 124, 32, 1, 1, 0, 1, 124, 32, - 0, 1, 0, 1, 128, 32, 2, 1, 0, 1, 128, 32, 1, 1, 0, 1, 128, 32, - 0, 1, 0, 1, 132, 32, 2, 1, 0, 1, 132, 32, 1, 1, 0, 1, 132, 32, - 0, 1, 0, 1, 136, 32, 2, 1, 0, 1, 136, 32, 1, 1, 0, 1, 136, 32, - 0, 1, 0, 1, 140, 28, 2, 1, 0, 1, 140, 32, 1, 1, 0, 1, 140, 32, - 0, 1, 0, 1, 144, 28, 2, 1, 0, 1, 144, 63, 1, 1, 0, 1, 144, 63, - 0, 1, 0, 1, 149, 32, 2, 1, 0, 1, 149, 63, 1, 1, 0, 1, 149, 63, - 0, 1, 0, 1, 153, 32, 2, 1, 0, 1, 153, 63, 1, 1, 0, 1, 153, 63, - 0, 1, 0, 1, 157, 32, 2, 1, 0, 1, 157, 63, 1, 1, 0, 1, 157, 63, - 0, 1, 0, 1, 161, 32, 2, 1, 0, 1, 161, 63, 1, 1, 0, 1, 161, 63, - 0, 1, 0, 1, 165, 32, 2, 1, 0, 1, 165, 63, 1, 1, 0, 1, 165, 63, - 0, 1, 0, 2, 36, 30, 2, 1, 0, 2, 36, 32, 1, 1, 0, 2, 36, 28, - 0, 1, 0, 2, 40, 32, 2, 1, 0, 2, 40, 32, 1, 1, 0, 2, 40, 28, - 0, 1, 0, 2, 44, 32, 2, 1, 0, 2, 44, 32, 1, 1, 0, 2, 44, 28, - 0, 1, 0, 2, 48, 32, 2, 1, 0, 2, 48, 32, 1, 1, 0, 2, 48, 28, - 0, 1, 0, 2, 52, 32, 2, 1, 0, 2, 52, 32, 1, 1, 0, 2, 52, 28, - 0, 1, 0, 2, 56, 32, 2, 1, 0, 2, 56, 32, 1, 1, 0, 2, 56, 28, - 0, 1, 0, 2, 60, 32, 2, 1, 0, 2, 60, 32, 1, 1, 0, 2, 60, 28, - 0, 1, 0, 2, 64, 28, 2, 1, 0, 2, 64, 32, 1, 1, 0, 2, 64, 28, - 0, 1, 0, 2, 100, 26, 2, 1, 0, 2, 100, 32, 1, 1, 0, 2, 100, 32, - 0, 1, 0, 2, 104, 32, 2, 1, 0, 2, 104, 32, 1, 1, 0, 2, 104, 32, - 0, 1, 0, 2, 108, 32, 2, 1, 0, 2, 108, 32, 1, 1, 0, 2, 108, 32, - 0, 1, 0, 2, 112, 32, 2, 1, 0, 2, 112, 32, 1, 1, 0, 2, 112, 32, - 0, 1, 0, 2, 116, 32, 2, 1, 0, 2, 116, 32, 1, 1, 0, 2, 116, 32, - 0, 1, 0, 2, 120, 32, 2, 1, 0, 2, 120, 32, 1, 1, 0, 2, 120, 32, - 0, 1, 0, 2, 124, 32, 2, 1, 0, 2, 124, 32, 1, 1, 0, 2, 124, 32, - 0, 1, 0, 2, 128, 32, 2, 1, 0, 2, 128, 32, 1, 1, 0, 2, 128, 32, - 0, 1, 0, 2, 132, 32, 2, 1, 0, 2, 132, 32, 1, 1, 0, 2, 132, 32, - 0, 1, 0, 2, 136, 32, 2, 1, 0, 2, 136, 32, 1, 1, 0, 2, 136, 32, - 0, 1, 0, 2, 140, 26, 2, 1, 0, 2, 140, 32, 1, 1, 0, 2, 140, 32, - 0, 1, 0, 2, 144, 26, 2, 1, 0, 2, 144, 63, 1, 1, 0, 2, 144, 63, - 0, 1, 0, 2, 149, 32, 2, 1, 0, 2, 149, 63, 1, 1, 0, 2, 149, 63, - 0, 1, 0, 2, 153, 32, 2, 1, 0, 2, 153, 63, 1, 1, 0, 2, 153, 63, - 0, 1, 0, 2, 157, 32, 2, 1, 0, 2, 157, 63, 1, 1, 0, 2, 157, 63, - 0, 1, 0, 2, 161, 32, 2, 1, 0, 2, 161, 63, 1, 1, 0, 2, 161, 63, - 0, 1, 0, 2, 165, 32, 2, 1, 0, 2, 165, 63, 1, 1, 0, 2, 165, 63, - 0, 1, 0, 3, 36, 28, 2, 1, 0, 3, 36, 20, 1, 1, 0, 3, 36, 22, - 0, 1, 0, 3, 40, 30, 2, 1, 0, 3, 40, 20, 1, 1, 0, 3, 40, 22, - 0, 1, 0, 3, 44, 30, 2, 1, 0, 3, 44, 20, 1, 1, 0, 3, 44, 22, - 0, 1, 0, 3, 48, 30, 2, 1, 0, 3, 48, 20, 1, 1, 0, 3, 48, 22, - 0, 1, 0, 3, 52, 30, 2, 1, 0, 3, 52, 20, 1, 1, 0, 3, 52, 22, - 0, 1, 0, 3, 56, 30, 2, 1, 0, 3, 56, 20, 1, 1, 0, 3, 56, 22, - 0, 1, 0, 3, 60, 30, 2, 1, 0, 3, 60, 20, 1, 1, 0, 3, 60, 22, - 0, 1, 0, 3, 64, 28, 2, 1, 0, 3, 64, 20, 1, 1, 0, 3, 64, 22, - 0, 1, 0, 3, 100, 26, 2, 1, 0, 3, 100, 20, 1, 1, 0, 3, 100, 30, - 0, 1, 0, 3, 104, 30, 2, 1, 0, 3, 104, 20, 1, 1, 0, 3, 104, 30, - 0, 1, 0, 3, 108, 32, 2, 1, 0, 3, 108, 20, 1, 1, 0, 3, 108, 30, - 0, 1, 0, 3, 112, 32, 2, 1, 0, 3, 112, 20, 1, 1, 0, 3, 112, 30, - 0, 1, 0, 3, 116, 32, 2, 1, 0, 3, 116, 20, 1, 1, 0, 3, 116, 30, - 0, 1, 0, 3, 120, 32, 2, 1, 0, 3, 120, 20, 1, 1, 0, 3, 120, 30, - 0, 1, 0, 3, 124, 32, 2, 1, 0, 3, 124, 20, 1, 1, 0, 3, 124, 30, - 0, 1, 0, 3, 128, 32, 2, 1, 0, 3, 128, 20, 1, 1, 0, 3, 128, 30, - 0, 1, 0, 3, 132, 32, 2, 1, 0, 3, 132, 20, 1, 1, 0, 3, 132, 30, - 0, 1, 0, 3, 136, 30, 2, 1, 0, 3, 136, 20, 1, 1, 0, 3, 136, 30, - 0, 1, 0, 3, 140, 26, 2, 1, 0, 3, 140, 20, 1, 1, 0, 3, 140, 30, - 0, 1, 0, 3, 144, 26, 2, 1, 0, 3, 144, 63, 1, 1, 0, 3, 144, 63, - 0, 1, 0, 3, 149, 32, 2, 1, 0, 3, 149, 63, 1, 1, 0, 3, 149, 63, - 0, 1, 0, 3, 153, 32, 2, 1, 0, 3, 153, 63, 1, 1, 0, 3, 153, 63, - 0, 1, 0, 3, 157, 32, 2, 1, 0, 3, 157, 63, 1, 1, 0, 3, 157, 63, - 0, 1, 0, 3, 161, 32, 2, 1, 0, 3, 161, 63, 1, 1, 0, 3, 161, 63, - 0, 1, 0, 3, 165, 32, 2, 1, 0, 3, 165, 63, 1, 1, 0, 3, 165, 63, - 0, 1, 1, 2, 38, 22, 2, 1, 1, 2, 38, 30, 1, 1, 1, 2, 38, 30, - 0, 1, 1, 2, 46, 30, 2, 1, 1, 2, 46, 30, 1, 1, 1, 2, 46, 30, - 0, 1, 1, 2, 54, 30, 2, 1, 1, 2, 54, 30, 1, 1, 1, 2, 54, 30, - 0, 1, 1, 2, 62, 24, 2, 1, 1, 2, 62, 30, 1, 1, 1, 2, 62, 30, - 0, 1, 1, 2, 102, 24, 2, 1, 1, 2, 102, 30, 1, 1, 1, 2, 102, 30, - 0, 1, 1, 2, 110, 30, 2, 1, 1, 2, 110, 30, 1, 1, 1, 2, 110, 30, - 0, 1, 1, 2, 118, 30, 2, 1, 1, 2, 118, 30, 1, 1, 1, 2, 118, 30, - 0, 1, 1, 2, 126, 30, 2, 1, 1, 2, 126, 30, 1, 1, 1, 2, 126, 30, - 0, 1, 1, 2, 134, 30, 2, 1, 1, 2, 134, 30, 1, 1, 1, 2, 134, 30, - 0, 1, 1, 2, 142, 30, 2, 1, 1, 2, 142, 63, 1, 1, 1, 2, 142, 63, - 0, 1, 1, 2, 151, 30, 2, 1, 1, 2, 151, 63, 1, 1, 1, 2, 151, 63, - 0, 1, 1, 2, 159, 30, 2, 1, 1, 2, 159, 63, 1, 1, 1, 2, 159, 63, - 0, 1, 1, 3, 38, 20, 2, 1, 1, 3, 38, 20, 1, 1, 1, 3, 38, 22, - 0, 1, 1, 3, 46, 30, 2, 1, 1, 3, 46, 20, 1, 1, 1, 3, 46, 22, - 0, 1, 1, 3, 54, 30, 2, 1, 1, 3, 54, 20, 1, 1, 1, 3, 54, 22, - 0, 1, 1, 3, 62, 22, 2, 1, 1, 3, 62, 20, 1, 1, 1, 3, 62, 22, - 0, 1, 1, 3, 102, 22, 2, 1, 1, 3, 102, 20, 1, 1, 1, 3, 102, 30, - 0, 1, 1, 3, 110, 30, 2, 1, 1, 3, 110, 20, 1, 1, 1, 3, 110, 30, - 0, 1, 1, 3, 118, 30, 2, 1, 1, 3, 118, 20, 1, 1, 1, 3, 118, 30, - 0, 1, 1, 3, 126, 30, 2, 1, 1, 3, 126, 20, 1, 1, 1, 3, 126, 30, - 0, 1, 1, 3, 134, 30, 2, 1, 1, 3, 134, 20, 1, 1, 1, 3, 134, 30, - 0, 1, 1, 3, 142, 30, 2, 1, 1, 3, 142, 63, 1, 1, 1, 3, 142, 63, - 0, 1, 1, 3, 151, 30, 2, 1, 1, 3, 151, 63, 1, 1, 1, 3, 151, 63, - 0, 1, 1, 3, 159, 30, 2, 1, 1, 3, 159, 63, 1, 1, 1, 3, 159, 63, - 0, 1, 2, 4, 42, 20, 2, 1, 2, 4, 42, 30, 1, 1, 2, 4, 42, 28, - 0, 1, 2, 4, 58, 20, 2, 1, 2, 4, 58, 30, 1, 1, 2, 4, 58, 28, - 0, 1, 2, 4, 106, 20, 2, 1, 2, 4, 106, 30, 1, 1, 2, 4, 106, 30, - 0, 1, 2, 4, 122, 30, 2, 1, 2, 4, 122, 30, 1, 1, 2, 4, 122, 30, - 0, 1, 2, 4, 138, 30, 2, 1, 2, 4, 138, 63, 1, 1, 2, 4, 138, 63, - 0, 1, 2, 4, 155, 30, 2, 1, 2, 4, 155, 63, 1, 1, 2, 4, 155, 63, - 0, 1, 2, 5, 42, 18, 2, 1, 2, 5, 42, 20, 1, 1, 2, 5, 42, 22, - 0, 1, 2, 5, 58, 18, 2, 1, 2, 5, 58, 20, 1, 1, 2, 5, 58, 22, - 0, 1, 2, 5, 106, 20, 2, 1, 2, 5, 106, 20, 1, 1, 2, 5, 106, 30, - 0, 1, 2, 5, 122, 30, 2, 1, 2, 5, 122, 20, 1, 1, 2, 5, 122, 30, - 0, 1, 2, 5, 138, 30, 2, 1, 2, 5, 138, 63, 1, 1, 2, 5, 138, 63, - 0, 1, 2, 5, 155, 30, 2, 1, 2, 5, 155, 63, 1, 1, 2, 5, 155, 63, +static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type5[] = { + { 0, 0, 0, 0, 1, 32, }, + { 2, 0, 0, 0, 1, 28, }, + { 1, 0, 0, 0, 1, 30, }, + { 0, 0, 0, 0, 2, 32, }, + { 2, 0, 0, 0, 2, 28, }, + { 1, 0, 0, 0, 2, 30, }, + { 0, 0, 0, 0, 3, 32, }, + { 2, 0, 0, 0, 3, 28, }, + { 1, 0, 0, 0, 3, 30, }, + { 0, 0, 0, 0, 4, 32, }, + { 2, 0, 0, 0, 4, 28, }, + { 1, 0, 0, 0, 4, 30, }, + { 0, 0, 0, 0, 5, 32, }, + { 2, 0, 0, 0, 5, 28, }, + { 1, 0, 0, 0, 5, 30, }, + { 0, 0, 0, 0, 6, 32, }, + { 2, 0, 0, 0, 6, 28, }, + { 1, 0, 0, 0, 6, 30, }, + { 0, 0, 0, 0, 7, 32, }, + { 2, 0, 0, 0, 7, 28, }, + { 1, 0, 0, 0, 7, 30, }, + { 0, 0, 0, 0, 8, 32, }, + { 2, 0, 0, 0, 8, 28, }, + { 1, 0, 0, 0, 8, 30, }, + { 0, 0, 0, 0, 9, 32, }, + { 2, 0, 0, 0, 9, 28, }, + { 1, 0, 0, 0, 9, 30, }, + { 0, 0, 0, 0, 10, 32, }, + { 2, 0, 0, 0, 10, 28, }, + { 1, 0, 0, 0, 10, 30, }, + { 0, 0, 0, 0, 11, 32, }, + { 2, 0, 0, 0, 11, 28, }, + { 1, 0, 0, 0, 11, 30, }, + { 0, 0, 0, 0, 12, 26, }, + { 2, 0, 0, 0, 12, 28, }, + { 1, 0, 0, 0, 12, 30, }, + { 0, 0, 0, 0, 13, 20, }, + { 2, 0, 0, 0, 13, 28, }, + { 1, 0, 0, 0, 13, 28, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 32, }, + { 0, 0, 0, 1, 1, 26, }, + { 2, 0, 0, 1, 1, 30, }, + { 1, 0, 0, 1, 1, 34, }, + { 0, 0, 0, 1, 2, 30, }, + { 2, 0, 0, 1, 2, 30, }, + { 1, 0, 0, 1, 2, 34, }, + { 0, 0, 0, 1, 3, 32, }, + { 2, 0, 0, 1, 3, 30, }, + { 1, 0, 0, 1, 3, 34, }, + { 0, 0, 0, 1, 4, 34, }, + { 2, 0, 0, 1, 4, 30, }, + { 1, 0, 0, 1, 4, 34, }, + { 0, 0, 0, 1, 5, 34, }, + { 2, 0, 0, 1, 5, 30, }, + { 1, 0, 0, 1, 5, 34, }, + { 0, 0, 0, 1, 6, 34, }, + { 2, 0, 0, 1, 6, 30, }, + { 1, 0, 0, 1, 6, 34, }, + { 0, 0, 0, 1, 7, 34, }, + { 2, 0, 0, 1, 7, 30, }, + { 1, 0, 0, 1, 7, 34, }, + { 0, 0, 0, 1, 8, 34, }, + { 2, 0, 0, 1, 8, 30, }, + { 1, 0, 0, 1, 8, 34, }, + { 0, 0, 0, 1, 9, 32, }, + { 2, 0, 0, 1, 9, 30, }, + { 1, 0, 0, 1, 9, 34, }, + { 0, 0, 0, 1, 10, 30, }, + { 2, 0, 0, 1, 10, 30, }, + { 1, 0, 0, 1, 10, 34, }, + { 0, 0, 0, 1, 11, 28, }, + { 2, 0, 0, 1, 11, 30, }, + { 1, 0, 0, 1, 11, 34, }, + { 0, 0, 0, 1, 12, 22, }, + { 2, 0, 0, 1, 12, 30, }, + { 1, 0, 0, 1, 12, 34, }, + { 0, 0, 0, 1, 13, 14, }, + { 2, 0, 0, 1, 13, 30, }, + { 1, 0, 0, 1, 13, 34, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 26, }, + { 2, 0, 0, 2, 1, 30, }, + { 1, 0, 0, 2, 1, 34, }, + { 0, 0, 0, 2, 2, 30, }, + { 2, 0, 0, 2, 2, 30, }, + { 1, 0, 0, 2, 2, 34, }, + { 0, 0, 0, 2, 3, 32, }, + { 2, 0, 0, 2, 3, 30, }, + { 1, 0, 0, 2, 3, 34, }, + { 0, 0, 0, 2, 4, 34, }, + { 2, 0, 0, 2, 4, 30, }, + { 1, 0, 0, 2, 4, 34, }, + { 0, 0, 0, 2, 5, 34, }, + { 2, 0, 0, 2, 5, 30, }, + { 1, 0, 0, 2, 5, 34, }, + { 0, 0, 0, 2, 6, 34, }, + { 2, 0, 0, 2, 6, 30, }, + { 1, 0, 0, 2, 6, 34, }, + { 0, 0, 0, 2, 7, 34, }, + { 2, 0, 0, 2, 7, 30, }, + { 1, 0, 0, 2, 7, 34, }, + { 0, 0, 0, 2, 8, 34, }, + { 2, 0, 0, 2, 8, 30, }, + { 1, 0, 0, 2, 8, 34, }, + { 0, 0, 0, 2, 9, 32, }, + { 2, 0, 0, 2, 9, 30, }, + { 1, 0, 0, 2, 9, 34, }, + { 0, 0, 0, 2, 10, 30, }, + { 2, 0, 0, 2, 10, 30, }, + { 1, 0, 0, 2, 10, 34, }, + { 0, 0, 0, 2, 11, 26, }, + { 2, 0, 0, 2, 11, 30, }, + { 1, 0, 0, 2, 11, 34, }, + { 0, 0, 0, 2, 12, 20, }, + { 2, 0, 0, 2, 12, 30, }, + { 1, 0, 0, 2, 12, 34, }, + { 0, 0, 0, 2, 13, 14, }, + { 2, 0, 0, 2, 13, 30, }, + { 1, 0, 0, 2, 13, 34, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 63, }, + { 0, 0, 0, 3, 1, 26, }, + { 2, 0, 0, 3, 1, 18, }, + { 1, 0, 0, 3, 1, 30, }, + { 0, 0, 0, 3, 2, 28, }, + { 2, 0, 0, 3, 2, 18, }, + { 1, 0, 0, 3, 2, 30, }, + { 0, 0, 0, 3, 3, 30, }, + { 2, 0, 0, 3, 3, 18, }, + { 1, 0, 0, 3, 3, 30, }, + { 0, 0, 0, 3, 4, 30, }, + { 2, 0, 0, 3, 4, 18, }, + { 1, 0, 0, 3, 4, 30, }, + { 0, 0, 0, 3, 5, 32, }, + { 2, 0, 0, 3, 5, 18, }, + { 1, 0, 0, 3, 5, 30, }, + { 0, 0, 0, 3, 6, 32, }, + { 2, 0, 0, 3, 6, 18, }, + { 1, 0, 0, 3, 6, 30, }, + { 0, 0, 0, 3, 7, 32, }, + { 2, 0, 0, 3, 7, 18, }, + { 1, 0, 0, 3, 7, 30, }, + { 0, 0, 0, 3, 8, 30, }, + { 2, 0, 0, 3, 8, 18, }, + { 1, 0, 0, 3, 8, 30, }, + { 0, 0, 0, 3, 9, 30, }, + { 2, 0, 0, 3, 9, 18, }, + { 1, 0, 0, 3, 9, 30, }, + { 0, 0, 0, 3, 10, 28, }, + { 2, 0, 0, 3, 10, 18, }, + { 1, 0, 0, 3, 10, 30, }, + { 0, 0, 0, 3, 11, 26, }, + { 2, 0, 0, 3, 11, 18, }, + { 1, 0, 0, 3, 11, 30, }, + { 0, 0, 0, 3, 12, 20, }, + { 2, 0, 0, 3, 12, 18, }, + { 1, 0, 0, 3, 12, 30, }, + { 0, 0, 0, 3, 13, 14, }, + { 2, 0, 0, 3, 13, 18, }, + { 1, 0, 0, 3, 13, 30, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 26, }, + { 2, 0, 1, 2, 3, 30, }, + { 1, 0, 1, 2, 3, 34, }, + { 0, 0, 1, 2, 4, 26, }, + { 2, 0, 1, 2, 4, 30, }, + { 1, 0, 1, 2, 4, 34, }, + { 0, 0, 1, 2, 5, 30, }, + { 2, 0, 1, 2, 5, 30, }, + { 1, 0, 1, 2, 5, 34, }, + { 0, 0, 1, 2, 6, 32, }, + { 2, 0, 1, 2, 6, 30, }, + { 1, 0, 1, 2, 6, 34, }, + { 0, 0, 1, 2, 7, 30, }, + { 2, 0, 1, 2, 7, 30, }, + { 1, 0, 1, 2, 7, 34, }, + { 0, 0, 1, 2, 8, 26, }, + { 2, 0, 1, 2, 8, 30, }, + { 1, 0, 1, 2, 8, 34, }, + { 0, 0, 1, 2, 9, 26, }, + { 2, 0, 1, 2, 9, 30, }, + { 1, 0, 1, 2, 9, 34, }, + { 0, 0, 1, 2, 10, 20, }, + { 2, 0, 1, 2, 10, 30, }, + { 1, 0, 1, 2, 10, 34, }, + { 0, 0, 1, 2, 11, 14, }, + { 2, 0, 1, 2, 11, 30, }, + { 1, 0, 1, 2, 11, 34, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 63, }, + { 1, 0, 1, 2, 12, 63, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 63, }, + { 1, 0, 1, 2, 13, 63, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 24, }, + { 2, 0, 1, 3, 3, 18, }, + { 1, 0, 1, 3, 3, 30, }, + { 0, 0, 1, 3, 4, 24, }, + { 2, 0, 1, 3, 4, 18, }, + { 1, 0, 1, 3, 4, 30, }, + { 0, 0, 1, 3, 5, 26, }, + { 2, 0, 1, 3, 5, 18, }, + { 1, 0, 1, 3, 5, 30, }, + { 0, 0, 1, 3, 6, 28, }, + { 2, 0, 1, 3, 6, 18, }, + { 1, 0, 1, 3, 6, 30, }, + { 0, 0, 1, 3, 7, 26, }, + { 2, 0, 1, 3, 7, 18, }, + { 1, 0, 1, 3, 7, 30, }, + { 0, 0, 1, 3, 8, 26, }, + { 2, 0, 1, 3, 8, 18, }, + { 1, 0, 1, 3, 8, 30, }, + { 0, 0, 1, 3, 9, 26, }, + { 2, 0, 1, 3, 9, 18, }, + { 1, 0, 1, 3, 9, 30, }, + { 0, 0, 1, 3, 10, 20, }, + { 2, 0, 1, 3, 10, 18, }, + { 1, 0, 1, 3, 10, 30, }, + { 0, 0, 1, 3, 11, 14, }, + { 2, 0, 1, 3, 11, 18, }, + { 1, 0, 1, 3, 11, 30, }, + { 0, 0, 1, 3, 12, 63, }, + { 2, 0, 1, 3, 12, 63, }, + { 1, 0, 1, 3, 12, 63, }, + { 0, 0, 1, 3, 13, 63, }, + { 2, 0, 1, 3, 13, 63, }, + { 1, 0, 1, 3, 13, 63, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 1, 0, 1, 36, 30, }, + { 2, 1, 0, 1, 36, 32, }, + { 1, 1, 0, 1, 36, 30, }, + { 0, 1, 0, 1, 40, 32, }, + { 2, 1, 0, 1, 40, 32, }, + { 1, 1, 0, 1, 40, 30, }, + { 0, 1, 0, 1, 44, 32, }, + { 2, 1, 0, 1, 44, 32, }, + { 1, 1, 0, 1, 44, 30, }, + { 0, 1, 0, 1, 48, 32, }, + { 2, 1, 0, 1, 48, 32, }, + { 1, 1, 0, 1, 48, 30, }, + { 0, 1, 0, 1, 52, 32, }, + { 2, 1, 0, 1, 52, 32, }, + { 1, 1, 0, 1, 52, 28, }, + { 0, 1, 0, 1, 56, 32, }, + { 2, 1, 0, 1, 56, 32, }, + { 1, 1, 0, 1, 56, 28, }, + { 0, 1, 0, 1, 60, 32, }, + { 2, 1, 0, 1, 60, 32, }, + { 1, 1, 0, 1, 60, 28, }, + { 0, 1, 0, 1, 64, 28, }, + { 2, 1, 0, 1, 64, 32, }, + { 1, 1, 0, 1, 64, 28, }, + { 0, 1, 0, 1, 100, 26, }, + { 2, 1, 0, 1, 100, 32, }, + { 1, 1, 0, 1, 100, 32, }, + { 0, 1, 0, 1, 104, 32, }, + { 2, 1, 0, 1, 104, 32, }, + { 1, 1, 0, 1, 104, 32, }, + { 0, 1, 0, 1, 108, 32, }, + { 2, 1, 0, 1, 108, 32, }, + { 1, 1, 0, 1, 108, 32, }, + { 0, 1, 0, 1, 112, 32, }, + { 2, 1, 0, 1, 112, 32, }, + { 1, 1, 0, 1, 112, 32, }, + { 0, 1, 0, 1, 116, 32, }, + { 2, 1, 0, 1, 116, 32, }, + { 1, 1, 0, 1, 116, 32, }, + { 0, 1, 0, 1, 120, 32, }, + { 2, 1, 0, 1, 120, 32, }, + { 1, 1, 0, 1, 120, 32, }, + { 0, 1, 0, 1, 124, 32, }, + { 2, 1, 0, 1, 124, 32, }, + { 1, 1, 0, 1, 124, 32, }, + { 0, 1, 0, 1, 128, 32, }, + { 2, 1, 0, 1, 128, 32, }, + { 1, 1, 0, 1, 128, 32, }, + { 0, 1, 0, 1, 132, 32, }, + { 2, 1, 0, 1, 132, 32, }, + { 1, 1, 0, 1, 132, 32, }, + { 0, 1, 0, 1, 136, 32, }, + { 2, 1, 0, 1, 136, 32, }, + { 1, 1, 0, 1, 136, 32, }, + { 0, 1, 0, 1, 140, 28, }, + { 2, 1, 0, 1, 140, 32, }, + { 1, 1, 0, 1, 140, 32, }, + { 0, 1, 0, 1, 144, 28, }, + { 2, 1, 0, 1, 144, 63, }, + { 1, 1, 0, 1, 144, 63, }, + { 0, 1, 0, 1, 149, 32, }, + { 2, 1, 0, 1, 149, 63, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 32, }, + { 2, 1, 0, 1, 153, 63, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 32, }, + { 2, 1, 0, 1, 157, 63, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 32, }, + { 2, 1, 0, 1, 161, 63, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 32, }, + { 2, 1, 0, 1, 165, 63, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 30, }, + { 2, 1, 0, 2, 36, 32, }, + { 1, 1, 0, 2, 36, 28, }, + { 0, 1, 0, 2, 40, 32, }, + { 2, 1, 0, 2, 40, 32, }, + { 1, 1, 0, 2, 40, 28, }, + { 0, 1, 0, 2, 44, 32, }, + { 2, 1, 0, 2, 44, 32, }, + { 1, 1, 0, 2, 44, 28, }, + { 0, 1, 0, 2, 48, 32, }, + { 2, 1, 0, 2, 48, 32, }, + { 1, 1, 0, 2, 48, 28, }, + { 0, 1, 0, 2, 52, 32, }, + { 2, 1, 0, 2, 52, 32, }, + { 1, 1, 0, 2, 52, 28, }, + { 0, 1, 0, 2, 56, 32, }, + { 2, 1, 0, 2, 56, 32, }, + { 1, 1, 0, 2, 56, 28, }, + { 0, 1, 0, 2, 60, 32, }, + { 2, 1, 0, 2, 60, 32, }, + { 1, 1, 0, 2, 60, 28, }, + { 0, 1, 0, 2, 64, 28, }, + { 2, 1, 0, 2, 64, 32, }, + { 1, 1, 0, 2, 64, 28, }, + { 0, 1, 0, 2, 100, 26, }, + { 2, 1, 0, 2, 100, 32, }, + { 1, 1, 0, 2, 100, 32, }, + { 0, 1, 0, 2, 104, 32, }, + { 2, 1, 0, 2, 104, 32, }, + { 1, 1, 0, 2, 104, 32, }, + { 0, 1, 0, 2, 108, 32, }, + { 2, 1, 0, 2, 108, 32, }, + { 1, 1, 0, 2, 108, 32, }, + { 0, 1, 0, 2, 112, 32, }, + { 2, 1, 0, 2, 112, 32, }, + { 1, 1, 0, 2, 112, 32, }, + { 0, 1, 0, 2, 116, 32, }, + { 2, 1, 0, 2, 116, 32, }, + { 1, 1, 0, 2, 116, 32, }, + { 0, 1, 0, 2, 120, 32, }, + { 2, 1, 0, 2, 120, 32, }, + { 1, 1, 0, 2, 120, 32, }, + { 0, 1, 0, 2, 124, 32, }, + { 2, 1, 0, 2, 124, 32, }, + { 1, 1, 0, 2, 124, 32, }, + { 0, 1, 0, 2, 128, 32, }, + { 2, 1, 0, 2, 128, 32, }, + { 1, 1, 0, 2, 128, 32, }, + { 0, 1, 0, 2, 132, 32, }, + { 2, 1, 0, 2, 132, 32, }, + { 1, 1, 0, 2, 132, 32, }, + { 0, 1, 0, 2, 136, 32, }, + { 2, 1, 0, 2, 136, 32, }, + { 1, 1, 0, 2, 136, 32, }, + { 0, 1, 0, 2, 140, 26, }, + { 2, 1, 0, 2, 140, 32, }, + { 1, 1, 0, 2, 140, 32, }, + { 0, 1, 0, 2, 144, 26, }, + { 2, 1, 0, 2, 144, 63, }, + { 1, 1, 0, 2, 144, 63, }, + { 0, 1, 0, 2, 149, 32, }, + { 2, 1, 0, 2, 149, 63, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 32, }, + { 2, 1, 0, 2, 153, 63, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 32, }, + { 2, 1, 0, 2, 157, 63, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 32, }, + { 2, 1, 0, 2, 161, 63, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 32, }, + { 2, 1, 0, 2, 165, 63, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 28, }, + { 2, 1, 0, 3, 36, 20, }, + { 1, 1, 0, 3, 36, 22, }, + { 0, 1, 0, 3, 40, 30, }, + { 2, 1, 0, 3, 40, 20, }, + { 1, 1, 0, 3, 40, 22, }, + { 0, 1, 0, 3, 44, 30, }, + { 2, 1, 0, 3, 44, 20, }, + { 1, 1, 0, 3, 44, 22, }, + { 0, 1, 0, 3, 48, 30, }, + { 2, 1, 0, 3, 48, 20, }, + { 1, 1, 0, 3, 48, 22, }, + { 0, 1, 0, 3, 52, 30, }, + { 2, 1, 0, 3, 52, 20, }, + { 1, 1, 0, 3, 52, 22, }, + { 0, 1, 0, 3, 56, 30, }, + { 2, 1, 0, 3, 56, 20, }, + { 1, 1, 0, 3, 56, 22, }, + { 0, 1, 0, 3, 60, 30, }, + { 2, 1, 0, 3, 60, 20, }, + { 1, 1, 0, 3, 60, 22, }, + { 0, 1, 0, 3, 64, 28, }, + { 2, 1, 0, 3, 64, 20, }, + { 1, 1, 0, 3, 64, 22, }, + { 0, 1, 0, 3, 100, 26, }, + { 2, 1, 0, 3, 100, 20, }, + { 1, 1, 0, 3, 100, 30, }, + { 0, 1, 0, 3, 104, 30, }, + { 2, 1, 0, 3, 104, 20, }, + { 1, 1, 0, 3, 104, 30, }, + { 0, 1, 0, 3, 108, 32, }, + { 2, 1, 0, 3, 108, 20, }, + { 1, 1, 0, 3, 108, 30, }, + { 0, 1, 0, 3, 112, 32, }, + { 2, 1, 0, 3, 112, 20, }, + { 1, 1, 0, 3, 112, 30, }, + { 0, 1, 0, 3, 116, 32, }, + { 2, 1, 0, 3, 116, 20, }, + { 1, 1, 0, 3, 116, 30, }, + { 0, 1, 0, 3, 120, 32, }, + { 2, 1, 0, 3, 120, 20, }, + { 1, 1, 0, 3, 120, 30, }, + { 0, 1, 0, 3, 124, 32, }, + { 2, 1, 0, 3, 124, 20, }, + { 1, 1, 0, 3, 124, 30, }, + { 0, 1, 0, 3, 128, 32, }, + { 2, 1, 0, 3, 128, 20, }, + { 1, 1, 0, 3, 128, 30, }, + { 0, 1, 0, 3, 132, 32, }, + { 2, 1, 0, 3, 132, 20, }, + { 1, 1, 0, 3, 132, 30, }, + { 0, 1, 0, 3, 136, 30, }, + { 2, 1, 0, 3, 136, 20, }, + { 1, 1, 0, 3, 136, 30, }, + { 0, 1, 0, 3, 140, 26, }, + { 2, 1, 0, 3, 140, 20, }, + { 1, 1, 0, 3, 140, 30, }, + { 0, 1, 0, 3, 144, 26, }, + { 2, 1, 0, 3, 144, 63, }, + { 1, 1, 0, 3, 144, 63, }, + { 0, 1, 0, 3, 149, 32, }, + { 2, 1, 0, 3, 149, 63, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 32, }, + { 2, 1, 0, 3, 153, 63, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 32, }, + { 2, 1, 0, 3, 157, 63, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 32, }, + { 2, 1, 0, 3, 161, 63, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 32, }, + { 2, 1, 0, 3, 165, 63, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 1, 2, 38, 22, }, + { 2, 1, 1, 2, 38, 30, }, + { 1, 1, 1, 2, 38, 30, }, + { 0, 1, 1, 2, 46, 30, }, + { 2, 1, 1, 2, 46, 30, }, + { 1, 1, 1, 2, 46, 30, }, + { 0, 1, 1, 2, 54, 30, }, + { 2, 1, 1, 2, 54, 30, }, + { 1, 1, 1, 2, 54, 30, }, + { 0, 1, 1, 2, 62, 24, }, + { 2, 1, 1, 2, 62, 30, }, + { 1, 1, 1, 2, 62, 30, }, + { 0, 1, 1, 2, 102, 24, }, + { 2, 1, 1, 2, 102, 30, }, + { 1, 1, 1, 2, 102, 30, }, + { 0, 1, 1, 2, 110, 30, }, + { 2, 1, 1, 2, 110, 30, }, + { 1, 1, 1, 2, 110, 30, }, + { 0, 1, 1, 2, 118, 30, }, + { 2, 1, 1, 2, 118, 30, }, + { 1, 1, 1, 2, 118, 30, }, + { 0, 1, 1, 2, 126, 30, }, + { 2, 1, 1, 2, 126, 30, }, + { 1, 1, 1, 2, 126, 30, }, + { 0, 1, 1, 2, 134, 30, }, + { 2, 1, 1, 2, 134, 30, }, + { 1, 1, 1, 2, 134, 30, }, + { 0, 1, 1, 2, 142, 30, }, + { 2, 1, 1, 2, 142, 63, }, + { 1, 1, 1, 2, 142, 63, }, + { 0, 1, 1, 2, 151, 30, }, + { 2, 1, 1, 2, 151, 63, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 30, }, + { 2, 1, 1, 2, 159, 63, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 20, }, + { 2, 1, 1, 3, 38, 20, }, + { 1, 1, 1, 3, 38, 22, }, + { 0, 1, 1, 3, 46, 30, }, + { 2, 1, 1, 3, 46, 20, }, + { 1, 1, 1, 3, 46, 22, }, + { 0, 1, 1, 3, 54, 30, }, + { 2, 1, 1, 3, 54, 20, }, + { 1, 1, 1, 3, 54, 22, }, + { 0, 1, 1, 3, 62, 22, }, + { 2, 1, 1, 3, 62, 20, }, + { 1, 1, 1, 3, 62, 22, }, + { 0, 1, 1, 3, 102, 22, }, + { 2, 1, 1, 3, 102, 20, }, + { 1, 1, 1, 3, 102, 30, }, + { 0, 1, 1, 3, 110, 30, }, + { 2, 1, 1, 3, 110, 20, }, + { 1, 1, 1, 3, 110, 30, }, + { 0, 1, 1, 3, 118, 30, }, + { 2, 1, 1, 3, 118, 20, }, + { 1, 1, 1, 3, 118, 30, }, + { 0, 1, 1, 3, 126, 30, }, + { 2, 1, 1, 3, 126, 20, }, + { 1, 1, 1, 3, 126, 30, }, + { 0, 1, 1, 3, 134, 30, }, + { 2, 1, 1, 3, 134, 20, }, + { 1, 1, 1, 3, 134, 30, }, + { 0, 1, 1, 3, 142, 30, }, + { 2, 1, 1, 3, 142, 63, }, + { 1, 1, 1, 3, 142, 63, }, + { 0, 1, 1, 3, 151, 30, }, + { 2, 1, 1, 3, 151, 63, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 30, }, + { 2, 1, 1, 3, 159, 63, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 2, 4, 42, 20, }, + { 2, 1, 2, 4, 42, 30, }, + { 1, 1, 2, 4, 42, 28, }, + { 0, 1, 2, 4, 58, 20, }, + { 2, 1, 2, 4, 58, 30, }, + { 1, 1, 2, 4, 58, 28, }, + { 0, 1, 2, 4, 106, 20, }, + { 2, 1, 2, 4, 106, 30, }, + { 1, 1, 2, 4, 106, 30, }, + { 0, 1, 2, 4, 122, 30, }, + { 2, 1, 2, 4, 122, 30, }, + { 1, 1, 2, 4, 122, 30, }, + { 0, 1, 2, 4, 138, 30, }, + { 2, 1, 2, 4, 138, 63, }, + { 1, 1, 2, 4, 138, 63, }, + { 0, 1, 2, 4, 155, 30, }, + { 2, 1, 2, 4, 155, 63, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 18, }, + { 2, 1, 2, 5, 42, 20, }, + { 1, 1, 2, 5, 42, 22, }, + { 0, 1, 2, 5, 58, 18, }, + { 2, 1, 2, 5, 58, 20, }, + { 1, 1, 2, 5, 58, 22, }, + { 0, 1, 2, 5, 106, 20, }, + { 2, 1, 2, 5, 106, 20, }, + { 1, 1, 2, 5, 106, 30, }, + { 0, 1, 2, 5, 122, 30, }, + { 2, 1, 2, 5, 122, 20, }, + { 1, 1, 2, 5, 122, 30, }, + { 0, 1, 2, 5, 138, 30, }, + { 2, 1, 2, 5, 138, 63, }, + { 1, 1, 2, 5, 138, 63, }, + { 0, 1, 2, 5, 155, 30, }, + { 2, 1, 2, 5, 155, 63, }, + { 1, 1, 2, 5, 155, 63, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type5); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index f6214ff20337..c2f6cd76a658 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -3,6 +3,7 @@ */ #include "main.h" +#include "coex.h" #include "fw.h" #include "tx.h" #include "rx.h" @@ -12,6 +13,7 @@ #include "mac.h" #include "reg.h" #include "debug.h" +#include "util.h" static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path, u8 rx_path, bool is_tx2_path); @@ -31,6 +33,7 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) map = (struct rtw8822c_efuse *)log_map; efuse->rfe_option = map->rfe_option; + efuse->rf_board_option = map->rf_board_option; efuse->crystal_cap = map->xtal_k; efuse->channel_plan = map->channel_plan; efuse->country_code[0] = map->country_code[0]; @@ -1015,6 +1018,9 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev) BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB); rtw_write32_set(rtwdev, REG_WLRF1, BIT_WLRF1_BBRF_EN); + /* disable low rate DPD */ + rtw_write32_mask(rtwdev, REG_DIS_DPD, DIS_DPD_MASK, DIS_DPD_RATEALL); + /* pre init before header files config */ rtw8822c_header_file_init(rtwdev, true); @@ -1041,12 +1047,6 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev) dm_info->cck_gi_l_bnd = ((cck_gi_l_bnd_msb << 4) | (cck_gi_l_bnd_lsb)); rtw8822c_rf_init(rtwdev); - /* wifi path controller */ - rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e); - rtw_write32_mask(rtwdev, 0x1704, 0xffffffff, 0x7700); - rtw_write32_mask(rtwdev, 0x1700, 0xffffffff, 0xc00f0038); - rtw_write32_mask(rtwdev, 0x6c0, 0xffffffff, 0xaaaaaaaa); - rtw_write32_mask(rtwdev, 0x6c4, 0xffffffff, 0xaaaaaaaa); } #define WLAN_TXQ_RPT_EN 0x1F @@ -1118,6 +1118,7 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev) #define WLAN_MAC_OPT_NORM_FUNC1 0x98 #define WLAN_MAC_OPT_LB_FUNC1 0x80 #define WLAN_MAC_OPT_FUNC2 0x30810041 +#define WLAN_MAC_INT_MIG_CFG 0x33330000 #define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \ (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \ @@ -1255,6 +1256,9 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev) value16 = BIT_SET_RXPSF_ERRTHR(value16, 0x07); rtw_write16(rtwdev, REG_RXPSF_CTRL, value16); + /* Interrupt migration configuration */ + rtw_write32(rtwdev, REG_INT_MIG, WLAN_MAC_INT_MIG_CFG); + return 0; } @@ -1624,16 +1628,16 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status, else if (gain_a > u_bnd) rx_power[RF_PATH_A] -= (gain_a - u_bnd) << 1; if (gain_b < l_bnd) - rx_power[RF_PATH_A] += (l_bnd - gain_b) << 1; + rx_power[RF_PATH_B] += (l_bnd - gain_b) << 1; else if (gain_b > u_bnd) - rx_power[RF_PATH_A] -= (gain_b - u_bnd) << 1; + rx_power[RF_PATH_B] -= (gain_b - u_bnd) << 1; rx_power[RF_PATH_A] -= 110; rx_power[RF_PATH_B] -= 110; - pkt_stat->rx_power[RF_PATH_A] = max3(rx_power[RF_PATH_A], - rx_power[RF_PATH_B], - min_rx_power); + pkt_stat->rx_power[RF_PATH_A] = rx_power[RF_PATH_A]; + pkt_stat->rx_power[RF_PATH_B] = rx_power[RF_PATH_B]; + pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1); pkt_stat->bw = RTW_CHANNEL_WIDTH_20; pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A], @@ -1817,6 +1821,7 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev) struct rtw_dm_info *dm_info = &rtwdev->dm_info; u32 cck_enable; u32 cck_fa_cnt; + u32 crc32_cnt; u32 ofdm_fa_cnt; u32 ofdm_fa_cnt1, ofdm_fa_cnt2, ofdm_fa_cnt3, ofdm_fa_cnt4, ofdm_fa_cnt5; u16 parity_fail, rate_illegal, crc8_fail, mcs_fail, sb_search_fail, @@ -1848,6 +1853,19 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev) dm_info->total_fa_cnt = ofdm_fa_cnt; dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0; + crc32_cnt = rtw_read32(rtwdev, 0x2c04); + dm_info->cck_ok_cnt = crc32_cnt & 0xffff; + dm_info->cck_err_cnt = (crc32_cnt & 0xffff0000) >> 16; + crc32_cnt = rtw_read32(rtwdev, 0x2c14); + dm_info->ofdm_ok_cnt = crc32_cnt & 0xffff; + dm_info->ofdm_err_cnt = (crc32_cnt & 0xffff0000) >> 16; + crc32_cnt = rtw_read32(rtwdev, 0x2c10); + dm_info->ht_ok_cnt = crc32_cnt & 0xffff; + dm_info->ht_err_cnt = (crc32_cnt & 0xffff0000) >> 16; + crc32_cnt = rtw_read32(rtwdev, 0x2c0c); + dm_info->vht_ok_cnt = crc32_cnt & 0xffff; + dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16; + rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 0); rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 2); rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 0); @@ -1862,6 +1880,1315 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev) static void rtw8822c_do_iqk(struct rtw_dev *rtwdev) { + struct rtw_iqk_para para = {0}; + u8 iqk_chk; + int counter; + + para.clear = 1; + rtw_fw_do_iqk(rtwdev, ¶); + + for (counter = 0; counter < 300; counter++) { + iqk_chk = rtw_read8(rtwdev, REG_RPT_CIP); + if (iqk_chk == 0xaa) + break; + msleep(20); + } + rtw_write8(rtwdev, REG_IQKSTAT, 0x0); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "iqk counter=%d\n", counter); +} + +/* for coex */ +static void rtw8822c_coex_cfg_init(struct rtw_dev *rtwdev) +{ + /* enable TBTT nterrupt */ + rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); + + /* BT report packet sample rate */ + /* 0x790[5:0]=0x5 */ + rtw_write8_set(rtwdev, REG_BT_TDMA_TIME, 0x05); + + /* enable BT counter statistics */ + rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1); + + /* enable PTA (3-wire function form BT side) */ + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN); + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_AOD_GPIO3); + + /* enable PTA (tx/rx signal form WiFi side) */ + rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN); + /* wl tx signal to PTA not case EDCCA */ + rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT_PTA_EDCCA_EN); + /* GNT_BT=1 while select both */ + rtw_write8_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY); + /* BT_CCA = ~GNT_WL_BB, (not or GNT_BT_BB, LTE_Rx */ + rtw_write8_clr(rtwdev, REG_DUMMY_PAGE4_V1, BIT_BTCCA_CTRL); + + /* to avoid RF parameter error */ + rtw_write_rf(rtwdev, RF_PATH_B, 0x1, 0xfffff, 0x40000); +} + +static void rtw8822c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + u32 rf_0x1; + + if (coex_stat->gnt_workaround_state == coex_stat->wl_coex_mode) + return; + + coex_stat->gnt_workaround_state = coex_stat->wl_coex_mode; + + if ((coex_stat->kt_ver == 0 && coex->under_5g) || coex->freerun) + rf_0x1 = 0x40021; + else + rf_0x1 = 0x40000; + + /* BT at S1 for Shared-Ant */ + if (efuse->share_ant) + rf_0x1 |= BIT(13); + + rtw_write_rf(rtwdev, RF_PATH_B, 0x1, 0xfffff, rf_0x1); + + /* WL-S0 2G RF TRX cannot be masked by GNT_BT + * enable "WLS0 BB chage RF mode if GNT_BT = 1" for shared-antenna type + * disable:0x1860[3] = 1, enable:0x1860[3] = 0 + * + * enable "DAC off if GNT_WL = 0" for non-shared-antenna + * disable 0x1c30[22] = 0, + * enable: 0x1c30[22] = 1, 0x1c38[12] = 0, 0x1c38[28] = 1 + * + * disable WL-S1 BB chage RF mode if GNT_BT + * since RF TRx mask can do it + */ + rtw_write8_mask(rtwdev, 0x1c32, BIT(6), 1); + rtw_write8_mask(rtwdev, 0x1c39, BIT(4), 0); + rtw_write8_mask(rtwdev, 0x1c3b, BIT(4), 1); + rtw_write8_mask(rtwdev, 0x4160, BIT(3), 1); + + /* disable WL-S0 BB chage RF mode if wifi is at 5G, + * or antenna path is separated + */ + if (coex_stat->wl_coex_mode == COEX_WLINK_5G || + coex->under_5g || !efuse->share_ant) { + if (coex_stat->kt_ver >= 3) { + rtw_write8_mask(rtwdev, 0x1860, BIT(3), 0); + rtw_write8_mask(rtwdev, 0x1ca7, BIT(3), 1); + } else { + rtw_write8_mask(rtwdev, 0x1860, BIT(3), 1); + } + } else { + /* shared-antenna */ + rtw_write8_mask(rtwdev, 0x1860, BIT(3), 0); + if (coex_stat->kt_ver >= 3) + rtw_write8_mask(rtwdev, 0x1ca7, BIT(3), 0); + } +} + +static void rtw8822c_coex_cfg_gnt_debug(struct rtw_dev *rtwdev) +{ + rtw_write8_mask(rtwdev, 0x66, BIT(4), 0); + rtw_write8_mask(rtwdev, 0x67, BIT(0), 0); + rtw_write8_mask(rtwdev, 0x42, BIT(3), 0); + rtw_write8_mask(rtwdev, 0x65, BIT(7), 0); + rtw_write8_mask(rtwdev, 0x73, BIT(3), 0); +} + +static void rtw8822c_coex_cfg_rfe_type(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + struct rtw_efuse *efuse = &rtwdev->efuse; + + coex_rfe->rfe_module_type = rtwdev->efuse.rfe_option; + coex_rfe->ant_switch_polarity = 0; + coex_rfe->ant_switch_exist = false; + coex_rfe->ant_switch_with_bt = false; + coex_rfe->ant_switch_diversity = false; + + if (efuse->share_ant) + coex_rfe->wlg_at_btg = true; + else + coex_rfe->wlg_at_btg = false; + + /* disable LTE coex in wifi side */ + rtw_coex_write_indirect_reg(rtwdev, 0x38, BIT_LTE_COEX_EN, 0x0); + rtw_coex_write_indirect_reg(rtwdev, 0xa0, MASKLWORD, 0xffff); + rtw_coex_write_indirect_reg(rtwdev, 0xa4, MASKLWORD, 0xffff); +} + +static void rtw8822c_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + + if (wl_pwr == coex_dm->cur_wl_pwr_lvl) + return; + + coex_dm->cur_wl_pwr_lvl = wl_pwr; +} + +static void rtw8822c_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + + if (low_gain == coex_dm->cur_wl_rx_low_gain_en) + return; + + coex_dm->cur_wl_rx_low_gain_en = low_gain; + + if (coex_dm->cur_wl_rx_low_gain_en) { + /* set Rx filter corner RCK offset */ + rtw_write_rf(rtwdev, RF_PATH_A, 0xde, 0xfffff, 0x22); + rtw_write_rf(rtwdev, RF_PATH_A, 0x1d, 0xfffff, 0x36); + rtw_write_rf(rtwdev, RF_PATH_B, 0xde, 0xfffff, 0x22); + rtw_write_rf(rtwdev, RF_PATH_B, 0x1d, 0xfffff, 0x36); + } else { + /* set Rx filter corner RCK offset */ + rtw_write_rf(rtwdev, RF_PATH_A, 0xde, 0xfffff, 0x20); + rtw_write_rf(rtwdev, RF_PATH_A, 0x1d, 0xfffff, 0x0); + rtw_write_rf(rtwdev, RF_PATH_B, 0x1d, 0xfffff, 0x0); + } +} + +struct dpk_cfg_pair { + u32 addr; + u32 bitmask; + u32 data; +}; + +void rtw8822c_parse_tbl_dpk(struct rtw_dev *rtwdev, + const struct rtw_table *tbl) +{ + const struct dpk_cfg_pair *p = tbl->data; + const struct dpk_cfg_pair *end = p + tbl->size / 3; + + BUILD_BUG_ON(sizeof(struct dpk_cfg_pair) != sizeof(u32) * 3); + + for (; p < end; p++) + rtw_write32_mask(rtwdev, p->addr, p->bitmask, p->data); +} + +static void rtw8822c_dpk_set_gnt_wl(struct rtw_dev *rtwdev, bool is_before_k) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + + if (is_before_k) { + dpk_info->gnt_control = rtw_read32(rtwdev, 0x70); + dpk_info->gnt_value = rtw_coex_read_indirect_reg(rtwdev, 0x38); + rtw_write32_mask(rtwdev, 0x70, BIT(26), 0x1); + rtw_coex_write_indirect_reg(rtwdev, 0x38, MASKBYTE1, 0x77); + } else { + rtw_coex_write_indirect_reg(rtwdev, 0x38, MASKDWORD, + dpk_info->gnt_value); + rtw_write32(rtwdev, 0x70, dpk_info->gnt_control); + } +} + +static void +rtw8822c_dpk_restore_registers(struct rtw_dev *rtwdev, u32 reg_num, + struct rtw_backup_info *bckp) +{ + rtw_restore_reg(rtwdev, bckp, reg_num); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0xc); + rtw_write32_mask(rtwdev, REG_RXSRAM_CTL, BIT_DPD_CLK, 0x4); +} + +static void +rtw8822c_dpk_backup_registers(struct rtw_dev *rtwdev, u32 *reg, + u32 reg_num, struct rtw_backup_info *bckp) +{ + u32 i; + + for (i = 0; i < reg_num; i++) { + bckp[i].len = 4; + bckp[i].reg = reg[i]; + bckp[i].val = rtw_read32(rtwdev, reg[i]); + } +} + +static void rtw8822c_dpk_backup_rf_registers(struct rtw_dev *rtwdev, + u32 *rf_reg, + u32 rf_reg_bak[][2]) +{ + u32 i; + + for (i = 0; i < DPK_RF_REG_NUM; i++) { + rf_reg_bak[i][RF_PATH_A] = rtw_read_rf(rtwdev, RF_PATH_A, + rf_reg[i], RFREG_MASK); + rf_reg_bak[i][RF_PATH_B] = rtw_read_rf(rtwdev, RF_PATH_B, + rf_reg[i], RFREG_MASK); + } +} + +static void rtw8822c_dpk_reload_rf_registers(struct rtw_dev *rtwdev, + u32 *rf_reg, + u32 rf_reg_bak[][2]) +{ + u32 i; + + for (i = 0; i < DPK_RF_REG_NUM; i++) { + rtw_write_rf(rtwdev, RF_PATH_A, rf_reg[i], RFREG_MASK, + rf_reg_bak[i][RF_PATH_A]); + rtw_write_rf(rtwdev, RF_PATH_B, rf_reg[i], RFREG_MASK, + rf_reg_bak[i][RF_PATH_B]); + } +} + +static void rtw8822c_dpk_information(struct rtw_dev *rtwdev) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u32 reg; + u8 band_shift; + + reg = rtw_read_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK); + + band_shift = FIELD_GET(BIT(16), reg); + dpk_info->dpk_band = 1 << band_shift; + dpk_info->dpk_ch = FIELD_GET(0xff, reg); + dpk_info->dpk_bw = FIELD_GET(0x3000, reg); +} + +static void rtw8822c_dpk_rxbb_dc_cal(struct rtw_dev *rtwdev, u8 path) +{ + rtw_write_rf(rtwdev, path, 0x92, RFREG_MASK, 0x84800); + udelay(5); + rtw_write_rf(rtwdev, path, 0x92, RFREG_MASK, 0x84801); + usleep_range(600, 610); + rtw_write_rf(rtwdev, path, 0x92, RFREG_MASK, 0x84800); +} + +static u8 rtw8822c_dpk_dc_corr_check(struct rtw_dev *rtwdev, u8 path) +{ + u16 dc_i, dc_q; + u8 corr_val, corr_idx; + + rtw_write32(rtwdev, REG_RXSRAM_CTL, 0x000900f0); + dc_i = (u16)rtw_read32_mask(rtwdev, REG_STAT_RPT, GENMASK(27, 16)); + dc_q = (u16)rtw_read32_mask(rtwdev, REG_STAT_RPT, GENMASK(11, 0)); + + if (dc_i & BIT(11)) + dc_i = 0x1000 - dc_i; + if (dc_q & BIT(11)) + dc_q = 0x1000 - dc_q; + + rtw_write32(rtwdev, REG_RXSRAM_CTL, 0x000000f0); + corr_idx = (u8)rtw_read32_mask(rtwdev, REG_STAT_RPT, GENMASK(7, 0)); + corr_val = (u8)rtw_read32_mask(rtwdev, REG_STAT_RPT, GENMASK(15, 8)); + + if (dc_i > 200 || dc_q > 200 || corr_idx < 40 || corr_idx > 65) + return 1; + else + return 0; + +} + +static void rtw8822c_dpk_tx_pause(struct rtw_dev *rtwdev) +{ + u8 reg_a, reg_b; + u16 count = 0; + + rtw_write8(rtwdev, 0x522, 0xff); + rtw_write32_mask(rtwdev, 0x1e70, 0xf, 0x2); + + do { + reg_a = (u8)rtw_read_rf(rtwdev, RF_PATH_A, 0x00, 0xf0000); + reg_b = (u8)rtw_read_rf(rtwdev, RF_PATH_B, 0x00, 0xf0000); + udelay(2); + count++; + } while ((reg_a == 2 || reg_b == 2) && count < 2500); +} + +static void rtw8822c_dpk_mac_bb_setting(struct rtw_dev *rtwdev) +{ + rtw8822c_dpk_tx_pause(rtwdev); + rtw_load_table(rtwdev, &rtw8822c_dpk_mac_bb_tbl); +} + +static void rtw8822c_dpk_afe_setting(struct rtw_dev *rtwdev, bool is_do_dpk) +{ + if (is_do_dpk) + rtw_load_table(rtwdev, &rtw8822c_dpk_afe_is_dpk_tbl); + else + rtw_load_table(rtwdev, &rtw8822c_dpk_afe_no_dpk_tbl); +} + +static void rtw8822c_dpk_pre_setting(struct rtw_dev *rtwdev) +{ + u8 path; + + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + rtw_write_rf(rtwdev, path, RF_RXAGC_OFFSET, RFREG_MASK, 0x0); + rtw_write32(rtwdev, REG_NCTL0, 0x8 | (path << 1)); + if (rtwdev->dm_info.dpk_info.dpk_band == RTW_BAND_2G) + rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f100000); + else + rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f0d0000); + rtw_write32_mask(rtwdev, REG_DPD_LUT0, BIT_GLOSS_DB, 0x4); + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x3); + } + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0xc); + rtw_write32(rtwdev, REG_DPD_CTL11, 0x3b23170b); + rtw_write32(rtwdev, REG_DPD_CTL12, 0x775f5347); +} + +static u32 rtw8822c_dpk_rf_setting(struct rtw_dev *rtwdev, u8 path) +{ + u32 ori_txbb; + + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, RFREG_MASK, 0x50017); + ori_txbb = rtw_read_rf(rtwdev, path, RF_TX_GAIN, RFREG_MASK); + + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x1); + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_PWR_TRIM, 0x1); + rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_TX_OFFSET_VAL, 0x0); + rtw_write_rf(rtwdev, path, RF_TX_GAIN, RFREG_MASK, ori_txbb); + + if (rtwdev->dm_info.dpk_info.dpk_band == RTW_BAND_2G) { + rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_LB_ATT, 0x1); + rtw_write_rf(rtwdev, path, RF_RXG_GAIN, BIT_RXG_GAIN, 0x0); + } else { + rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_TXA_LB_ATT, 0x0); + rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_LB_ATT, 0x6); + rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_LB_SW, 0x1); + rtw_write_rf(rtwdev, path, RF_RXA_MIX_GAIN, BIT_RXA_MIX_GAIN, 0); + } + + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RXAGC, 0xf); + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TRXBW, 0x1); + rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_RXBB, 0x0); + + if (rtwdev->dm_info.dpk_info.dpk_bw == DPK_CHANNEL_WIDTH_80) + rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_TXBB, 0x2); + else + rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_TXBB, 0x1); + + rtw_write_rf(rtwdev, path, RF_EXT_TIA_BW, BIT(1), 0x1); + + usleep_range(100, 110); + + return ori_txbb & 0x1f; +} + +static u16 rtw8822c_dpk_get_cmd(struct rtw_dev *rtwdev, u8 action, u8 path) +{ + u16 cmd; + u8 bw = rtwdev->dm_info.dpk_info.dpk_bw == DPK_CHANNEL_WIDTH_80 ? 2 : 0; + + switch (action) { + case RTW_DPK_GAIN_LOSS: + cmd = 0x14 + path; + break; + case RTW_DPK_DO_DPK: + cmd = 0x16 + path + bw; + break; + case RTW_DPK_DPK_ON: + cmd = 0x1a + path; + break; + case RTW_DPK_DAGC: + cmd = 0x1c + path + bw; + break; + default: + return 0; + } + + return (cmd << 8) | 0x48; +} + +static u8 rtw8822c_dpk_one_shot(struct rtw_dev *rtwdev, u8 path, u8 action) +{ + u16 dpk_cmd; + u8 result = 0; + + rtw8822c_dpk_set_gnt_wl(rtwdev, true); + + if (action == RTW_DPK_CAL_PWR) { + rtw_write32_mask(rtwdev, REG_DPD_CTL0, BIT(12), 0x1); + rtw_write32_mask(rtwdev, REG_DPD_CTL0, BIT(12), 0x0); + rtw_write32_mask(rtwdev, REG_RXSRAM_CTL, BIT_RPT_SEL, 0x0); + msleep(10); + if (!check_hw_ready(rtwdev, REG_STAT_RPT, BIT(31), 0x1)) { + result = 1; + rtw_dbg(rtwdev, RTW_DBG_RFK, "[DPK] one-shot over 20ms\n"); + } + } else { + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, + 0x8 | (path << 1)); + rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_IQ_SWITCH, 0x9); + + dpk_cmd = rtw8822c_dpk_get_cmd(rtwdev, action, path); + rtw_write32(rtwdev, REG_NCTL0, dpk_cmd); + rtw_write32(rtwdev, REG_NCTL0, dpk_cmd + 1); + msleep(10); + if (!check_hw_ready(rtwdev, 0x2d9c, 0xff, 0x55)) { + result = 1; + rtw_dbg(rtwdev, RTW_DBG_RFK, "[DPK] one-shot over 20ms\n"); + } + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, + 0x8 | (path << 1)); + rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_IQ_SWITCH, 0x0); + } + + rtw8822c_dpk_set_gnt_wl(rtwdev, false); + + rtw_write8(rtwdev, 0x1b10, 0x0); + + return result; +} + +static u16 rtw8822c_dpk_dgain_read(struct rtw_dev *rtwdev, u8 path) +{ + u16 dgain; + + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0xc); + rtw_write32_mask(rtwdev, REG_RXSRAM_CTL, 0x00ff0000, 0x0); + + dgain = (u16)rtw_read32_mask(rtwdev, REG_STAT_RPT, GENMASK(27, 16)); + + return dgain; +} + +static u8 rtw8822c_dpk_thermal_read(struct rtw_dev *rtwdev, u8 path) +{ + rtw_write_rf(rtwdev, path, RF_T_METER, BIT(19), 0x1); + rtw_write_rf(rtwdev, path, RF_T_METER, BIT(19), 0x0); + rtw_write_rf(rtwdev, path, RF_T_METER, BIT(19), 0x1); + udelay(15); + + return (u8)rtw_read_rf(rtwdev, path, RF_T_METER, 0x0007e); +} + +static u32 rtw8822c_dpk_pas_read(struct rtw_dev *rtwdev, u8 path) +{ + u32 i_val, q_val; + + rtw_write32(rtwdev, REG_NCTL0, 0x8 | (path << 1)); + rtw_write32_mask(rtwdev, 0x1b48, BIT(14), 0x0); + rtw_write32(rtwdev, REG_RXSRAM_CTL, 0x00060001); + rtw_write32(rtwdev, 0x1b4c, 0x00000000); + rtw_write32(rtwdev, 0x1b4c, 0x00080000); + + q_val = rtw_read32_mask(rtwdev, REG_STAT_RPT, MASKHWORD); + i_val = rtw_read32_mask(rtwdev, REG_STAT_RPT, MASKLWORD); + + if (i_val & BIT(15)) + i_val = 0x10000 - i_val; + if (q_val & BIT(15)) + q_val = 0x10000 - q_val; + + rtw_write32(rtwdev, 0x1b4c, 0x00000000); + + return i_val * i_val + q_val * q_val; +} + +static u32 rtw8822c_psd_log2base(u32 val) +{ + u32 tmp, val_integerd_b, tindex; + u32 result, val_fractiond_b; + u32 table_fraction[21] = {0, 432, 332, 274, 232, 200, 174, + 151, 132, 115, 100, 86, 74, 62, 51, + 42, 32, 23, 15, 7, 0}; + + if (val == 0) + return 0; + + val_integerd_b = __fls(val) + 1; + + tmp = (val * 100) / (1 << val_integerd_b); + tindex = tmp / 5; + + if (tindex >= ARRAY_SIZE(table_fraction)) + tindex = ARRAY_SIZE(table_fraction) - 1; + + val_fractiond_b = table_fraction[tindex]; + + result = val_integerd_b * 100 - val_fractiond_b; + + return result; +} + +static u8 rtw8822c_dpk_gainloss_result(struct rtw_dev *rtwdev, u8 path) +{ + u8 result; + + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x8 | (path << 1)); + rtw_write32_mask(rtwdev, 0x1b48, BIT(14), 0x1); + rtw_write32(rtwdev, REG_RXSRAM_CTL, 0x00060000); + + result = (u8)rtw_read32_mask(rtwdev, REG_STAT_RPT, 0x000000f0); + + rtw_write32_mask(rtwdev, 0x1b48, BIT(14), 0x0); + + return result; +} + +static u8 rtw8822c_dpk_agc_gain_chk(struct rtw_dev *rtwdev, u8 path, + u8 limited_pga) +{ + u8 result = 0; + u16 dgain; + + rtw8822c_dpk_one_shot(rtwdev, path, RTW_DPK_DAGC); + dgain = rtw8822c_dpk_dgain_read(rtwdev, path); + + if (dgain > 1535 && !limited_pga) + return RTW_DPK_GAIN_LESS; + else if (dgain < 768 && !limited_pga) + return RTW_DPK_GAIN_LARGE; + else + return result; +} + +static u8 rtw8822c_dpk_agc_loss_chk(struct rtw_dev *rtwdev, u8 path) +{ + u32 loss, loss_db; + + loss = rtw8822c_dpk_pas_read(rtwdev, path); + if (loss < 0x4000000) + return RTW_DPK_GL_LESS; + loss_db = 3 * rtw8822c_psd_log2base(loss >> 13) - 3870; + + if (loss_db > 1000) + return RTW_DPK_GL_LARGE; + else if (loss_db < 250) + return RTW_DPK_GL_LESS; + else + return RTW_DPK_AGC_OUT; +} + +struct rtw8822c_dpk_data { + u8 txbb; + u8 pga; + u8 limited_pga; + u8 agc_cnt; + bool loss_only; + bool gain_only; + u8 path; +}; + +static u8 rtw8822c_gain_check_state(struct rtw_dev *rtwdev, + struct rtw8822c_dpk_data *data) +{ + u8 state; + + data->txbb = (u8)rtw_read_rf(rtwdev, data->path, RF_TX_GAIN, + BIT_GAIN_TXBB); + data->pga = (u8)rtw_read_rf(rtwdev, data->path, RF_MODE_TRXAGC, + BIT_RXAGC); + + if (data->loss_only) { + state = RTW_DPK_LOSS_CHECK; + goto check_end; + } + + state = rtw8822c_dpk_agc_gain_chk(rtwdev, data->path, + data->limited_pga); + if (state == RTW_DPK_GAIN_CHECK && data->gain_only) + state = RTW_DPK_AGC_OUT; + else if (state == RTW_DPK_GAIN_CHECK) + state = RTW_DPK_LOSS_CHECK; + +check_end: + data->agc_cnt++; + if (data->agc_cnt >= 6) + state = RTW_DPK_AGC_OUT; + + return state; +} + +static u8 rtw8822c_gain_large_state(struct rtw_dev *rtwdev, + struct rtw8822c_dpk_data *data) +{ + u8 pga = data->pga; + + if (pga > 0xe) + rtw_write_rf(rtwdev, data->path, RF_MODE_TRXAGC, BIT_RXAGC, 0xc); + else if (pga > 0xb && pga < 0xf) + rtw_write_rf(rtwdev, data->path, RF_MODE_TRXAGC, BIT_RXAGC, 0x0); + else if (pga < 0xc) + data->limited_pga = 1; + + return RTW_DPK_GAIN_CHECK; +} + +static u8 rtw8822c_gain_less_state(struct rtw_dev *rtwdev, + struct rtw8822c_dpk_data *data) +{ + u8 pga = data->pga; + + if (pga < 0xc) + rtw_write_rf(rtwdev, data->path, RF_MODE_TRXAGC, BIT_RXAGC, 0xc); + else if (pga > 0xb && pga < 0xf) + rtw_write_rf(rtwdev, data->path, RF_MODE_TRXAGC, BIT_RXAGC, 0xf); + else if (pga > 0xe) + data->limited_pga = 1; + + return RTW_DPK_GAIN_CHECK; +} + +static u8 rtw8822c_gl_state(struct rtw_dev *rtwdev, + struct rtw8822c_dpk_data *data, u8 is_large) +{ + u8 txbb_bound[] = {0x1f, 0}; + + if (data->txbb == txbb_bound[is_large]) + return RTW_DPK_AGC_OUT; + + if (is_large == 1) + data->txbb -= 2; + else + data->txbb += 3; + + rtw_write_rf(rtwdev, data->path, RF_TX_GAIN, BIT_GAIN_TXBB, data->txbb); + data->limited_pga = 0; + + return RTW_DPK_GAIN_CHECK; +} + +static u8 rtw8822c_gl_large_state(struct rtw_dev *rtwdev, + struct rtw8822c_dpk_data *data) +{ + return rtw8822c_gl_state(rtwdev, data, 1); +} + +static u8 rtw8822c_gl_less_state(struct rtw_dev *rtwdev, + struct rtw8822c_dpk_data *data) +{ + return rtw8822c_gl_state(rtwdev, data, 0); +} + +static u8 rtw8822c_loss_check_state(struct rtw_dev *rtwdev, + struct rtw8822c_dpk_data *data) +{ + u8 path = data->path; + u8 state; + + rtw8822c_dpk_one_shot(rtwdev, path, RTW_DPK_GAIN_LOSS); + state = rtw8822c_dpk_agc_loss_chk(rtwdev, path); + + return state; +} + +static u8 (*dpk_state[])(struct rtw_dev *rtwdev, + struct rtw8822c_dpk_data *data) = { + rtw8822c_gain_check_state, rtw8822c_gain_large_state, + rtw8822c_gain_less_state, rtw8822c_gl_large_state, + rtw8822c_gl_less_state, rtw8822c_loss_check_state }; + +static u8 rtw8822c_dpk_pas_agc(struct rtw_dev *rtwdev, u8 path, + bool gain_only, bool loss_only) +{ + struct rtw8822c_dpk_data data = {0}; + u8 (*func)(struct rtw_dev *rtwdev, struct rtw8822c_dpk_data *data); + u8 state = RTW_DPK_GAIN_CHECK; + + data.loss_only = loss_only; + data.gain_only = gain_only; + data.path = path; + + for (;;) { + func = dpk_state[state]; + state = func(rtwdev, &data); + if (state == RTW_DPK_AGC_OUT) + break; + } + + return data.txbb; +} + +static bool rtw8822c_dpk_coef_iq_check(struct rtw_dev *rtwdev, + u16 coef_i, u16 coef_q) +{ + if (coef_i == 0x1000 || coef_i == 0x0fff || + coef_q == 0x1000 || coef_q == 0x0fff) + return 1; + else + return 0; +} + +static u32 rtw8822c_dpk_coef_transfer(struct rtw_dev *rtwdev) +{ + u32 reg = 0; + u16 coef_i = 0, coef_q = 0; + + reg = rtw_read32(rtwdev, REG_STAT_RPT); + + coef_i = (u16)rtw_read32_mask(rtwdev, REG_STAT_RPT, MASKHWORD) & 0x1fff; + coef_q = (u16)rtw_read32_mask(rtwdev, REG_STAT_RPT, MASKLWORD) & 0x1fff; + + coef_q = ((0x2000 - coef_q) & 0x1fff) - 1; + + reg = (coef_i << 16) | coef_q; + + return reg; +} + +static const u32 rtw8822c_dpk_get_coef_tbl[] = { + 0x000400f0, 0x040400f0, 0x080400f0, 0x010400f0, 0x050400f0, + 0x090400f0, 0x020400f0, 0x060400f0, 0x0a0400f0, 0x030400f0, + 0x070400f0, 0x0b0400f0, 0x0c0400f0, 0x100400f0, 0x0d0400f0, + 0x110400f0, 0x0e0400f0, 0x120400f0, 0x0f0400f0, 0x130400f0, +}; + +static void rtw8822c_dpk_coef_tbl_apply(struct rtw_dev *rtwdev, u8 path) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + int i; + + for (i = 0; i < 20; i++) { + rtw_write32(rtwdev, REG_RXSRAM_CTL, + rtw8822c_dpk_get_coef_tbl[i]); + dpk_info->coef[path][i] = rtw8822c_dpk_coef_transfer(rtwdev); + } +} + +static void rtw8822c_dpk_get_coef(struct rtw_dev *rtwdev, u8 path) +{ + rtw_write32(rtwdev, REG_NCTL0, 0x0000000c); + + if (path == RF_PATH_A) { + rtw_write32_mask(rtwdev, REG_DPD_CTL0, BIT(24), 0x0); + rtw_write32(rtwdev, REG_DPD_CTL0_S0, 0x30000080); + } else if (path == RF_PATH_B) { + rtw_write32_mask(rtwdev, REG_DPD_CTL0, BIT(24), 0x1); + rtw_write32(rtwdev, REG_DPD_CTL0_S1, 0x30000080); + } + + rtw8822c_dpk_coef_tbl_apply(rtwdev, path); +} + +static u8 rtw8822c_dpk_coef_read(struct rtw_dev *rtwdev, u8 path) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u8 addr, result = 1; + u16 coef_i, coef_q; + + for (addr = 0; addr < 20; addr++) { + coef_i = FIELD_GET(0x1fff0000, dpk_info->coef[path][addr]); + coef_q = FIELD_GET(0x1fff, dpk_info->coef[path][addr]); + + if (rtw8822c_dpk_coef_iq_check(rtwdev, coef_i, coef_q)) { + result = 0; + break; + } + } + return result; +} + +static void rtw8822c_dpk_coef_write(struct rtw_dev *rtwdev, u8 path, u8 result) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u16 reg[DPK_RF_PATH_NUM] = {0x1b0c, 0x1b64}; + u32 coef; + u8 addr; + + rtw_write32(rtwdev, REG_NCTL0, 0x0000000c); + rtw_write32(rtwdev, REG_RXSRAM_CTL, 0x000000f0); + + for (addr = 0; addr < 20; addr++) { + if (result == 0) { + if (addr == 3) + coef = 0x04001fff; + else + coef = 0x00001fff; + } else { + coef = dpk_info->coef[path][addr]; + } + rtw_write32(rtwdev, reg[path] + addr * 4, coef); + } +} + +static void rtw8822c_dpk_fill_result(struct rtw_dev *rtwdev, u32 dpk_txagc, + u8 path, u8 result) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x8 | (path << 1)); + + if (result) + rtw_write8(rtwdev, REG_DPD_AGC, (u8)(dpk_txagc - 6)); + else + rtw_write8(rtwdev, REG_DPD_AGC, 0x00); + + dpk_info->result[path] = result; + dpk_info->dpk_txagc[path] = rtw_read8(rtwdev, REG_DPD_AGC); + + rtw8822c_dpk_coef_write(rtwdev, path, result); +} + +static u32 rtw8822c_dpk_gainloss(struct rtw_dev *rtwdev, u8 path) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u8 tx_agc, tx_bb, ori_txbb, ori_txagc, tx_agc_search, t1, t2; + + ori_txbb = rtw8822c_dpk_rf_setting(rtwdev, path); + ori_txagc = (u8)rtw_read_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_TXAGC); + + rtw8822c_dpk_rxbb_dc_cal(rtwdev, path); + rtw8822c_dpk_one_shot(rtwdev, path, RTW_DPK_DAGC); + rtw8822c_dpk_dgain_read(rtwdev, path); + + if (rtw8822c_dpk_dc_corr_check(rtwdev, path)) { + rtw8822c_dpk_rxbb_dc_cal(rtwdev, path); + rtw8822c_dpk_one_shot(rtwdev, path, RTW_DPK_DAGC); + rtw8822c_dpk_dc_corr_check(rtwdev, path); + } + + t1 = rtw8822c_dpk_thermal_read(rtwdev, path); + tx_bb = rtw8822c_dpk_pas_agc(rtwdev, path, false, true); + tx_agc_search = rtw8822c_dpk_gainloss_result(rtwdev, path); + + if (tx_bb < tx_agc_search) + tx_bb = 0; + else + tx_bb = tx_bb - tx_agc_search; + + rtw_write_rf(rtwdev, path, RF_TX_GAIN, BIT_GAIN_TXBB, tx_bb); + + tx_agc = ori_txagc - (ori_txbb - tx_bb); + + t2 = rtw8822c_dpk_thermal_read(rtwdev, path); + + dpk_info->thermal_dpk_delta[path] = abs(t2 - t1); + + return tx_agc; +} + +static u8 rtw8822c_dpk_by_path(struct rtw_dev *rtwdev, u32 tx_agc, u8 path) +{ + u8 result; + + result = rtw8822c_dpk_one_shot(rtwdev, path, RTW_DPK_DO_DPK); + + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x8 | (path << 1)); + + result = result | (u8)rtw_read32_mask(rtwdev, REG_DPD_CTL1_S0, BIT(26)); + + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, RFREG_MASK, 0x33e14); + + rtw8822c_dpk_get_coef(rtwdev, path); + + return result; +} + +static void rtw8822c_dpk_cal_gs(struct rtw_dev *rtwdev, u8 path) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u32 tmp_gs = 0; + + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x8 | (path << 1)); + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_BYPASS_DPD, 0x0); + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0); + rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_IQ_SWITCH, 0x9); + rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_INNER_LB, 0x1); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0xc); + rtw_write32_mask(rtwdev, REG_RXSRAM_CTL, BIT_DPD_CLK, 0xf); + + if (path == RF_PATH_A) { + rtw_write32_mask(rtwdev, REG_DPD_CTL0_S0, BIT_GS_PWSF, + 0x1066680); + rtw_write32_mask(rtwdev, REG_DPD_CTL1_S0, BIT_DPD_EN, 0x1); + } else { + rtw_write32_mask(rtwdev, REG_DPD_CTL0_S1, BIT_GS_PWSF, + 0x1066680); + rtw_write32_mask(rtwdev, REG_DPD_CTL1_S1, BIT_DPD_EN, 0x1); + } + + if (dpk_info->dpk_bw == DPK_CHANNEL_WIDTH_80) { + rtw_write32(rtwdev, REG_DPD_CTL16, 0x80001310); + rtw_write32(rtwdev, REG_DPD_CTL16, 0x00001310); + rtw_write32(rtwdev, REG_DPD_CTL16, 0x810000db); + rtw_write32(rtwdev, REG_DPD_CTL16, 0x010000db); + rtw_write32(rtwdev, REG_DPD_CTL16, 0x0000b428); + rtw_write32(rtwdev, REG_DPD_CTL15, + 0x05020000 | (BIT(path) << 28)); + } else { + rtw_write32(rtwdev, REG_DPD_CTL16, 0x8200190c); + rtw_write32(rtwdev, REG_DPD_CTL16, 0x0200190c); + rtw_write32(rtwdev, REG_DPD_CTL16, 0x8301ee14); + rtw_write32(rtwdev, REG_DPD_CTL16, 0x0301ee14); + rtw_write32(rtwdev, REG_DPD_CTL16, 0x0000b428); + rtw_write32(rtwdev, REG_DPD_CTL15, + 0x05020008 | (BIT(path) << 28)); + } + + rtw_write32_mask(rtwdev, REG_DPD_CTL0, MASKBYTE3, 0x8 | path); + + rtw8822c_dpk_one_shot(rtwdev, path, RTW_DPK_CAL_PWR); + + rtw_write32_mask(rtwdev, REG_DPD_CTL15, MASKBYTE3, 0x0); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x8 | (path << 1)); + rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_IQ_SWITCH, 0x0); + rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_INNER_LB, 0x0); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0xc); + + if (path == RF_PATH_A) + rtw_write32_mask(rtwdev, REG_DPD_CTL0_S0, BIT_GS_PWSF, 0x5b); + else + rtw_write32_mask(rtwdev, REG_DPD_CTL0_S1, BIT_GS_PWSF, 0x5b); + + rtw_write32_mask(rtwdev, REG_RXSRAM_CTL, BIT_RPT_SEL, 0x0); + + tmp_gs = (u16)rtw_read32_mask(rtwdev, REG_STAT_RPT, BIT_RPT_DGAIN); + tmp_gs = (tmp_gs * 910) >> 10; + tmp_gs = DIV_ROUND_CLOSEST(tmp_gs, 10); + + if (path == RF_PATH_A) + rtw_write32_mask(rtwdev, REG_DPD_CTL0_S0, BIT_GS_PWSF, tmp_gs); + else + rtw_write32_mask(rtwdev, REG_DPD_CTL0_S1, BIT_GS_PWSF, tmp_gs); + + dpk_info->dpk_gs[path] = tmp_gs; +} + +void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u32 offset[DPK_RF_PATH_NUM] = {0, 0x58}; + u32 i_scaling; + u8 path; + + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x0000000c); + rtw_write32(rtwdev, REG_RXSRAM_CTL, 0x000000f0); + rtw_write32(rtwdev, REG_NCTL0, 0x00001148); + rtw_write32(rtwdev, REG_NCTL0, 0x00001149); + + check_hw_ready(rtwdev, 0x2d9c, MASKBYTE0, 0x55); + + rtw_write8(rtwdev, 0x1b10, 0x0); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x0000000c); + + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + i_scaling = 0x16c00 / dpk_info->dpk_gs[path]; + + rtw_write32_mask(rtwdev, 0x1b18 + offset[path], MASKHWORD, + i_scaling); + rtw_write32_mask(rtwdev, REG_DPD_CTL0_S0 + offset[path], + GENMASK(31, 28), 0x9); + rtw_write32_mask(rtwdev, REG_DPD_CTL0_S0 + offset[path], + GENMASK(31, 28), 0x1); + rtw_write32_mask(rtwdev, REG_DPD_CTL0_S0 + offset[path], + GENMASK(31, 28), 0x0); + rtw_write32_mask(rtwdev, REG_DPD_CTL1_S0 + offset[path], + BIT(14), 0x0); + } +} + +static void rtw8822c_dpk_on(struct rtw_dev *rtwdev, u8 path) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + + rtw8822c_dpk_one_shot(rtwdev, path, RTW_DPK_DPK_ON); + + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x8 | (path << 1)); + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0); + + if (test_bit(path, dpk_info->dpk_path_ok)) + rtw8822c_dpk_cal_gs(rtwdev, path); +} + +static bool rtw8822c_dpk_check_pass(struct rtw_dev *rtwdev, bool is_fail, + u32 dpk_txagc, u8 path) +{ + bool result; + + if (!is_fail) { + if (rtw8822c_dpk_coef_read(rtwdev, path)) + result = true; + else + result = false; + } else { + result = false; + } + + rtw8822c_dpk_fill_result(rtwdev, dpk_txagc, path, result); + + return result; +} + +static void rtw8822c_dpk_result_reset(struct rtw_dev *rtwdev) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u8 path; + + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + clear_bit(path, dpk_info->dpk_path_ok); + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, + 0x8 | (path << 1)); + rtw_write32_mask(rtwdev, 0x1b58, 0x0000007f, 0x0); + + dpk_info->dpk_txagc[path] = 0; + dpk_info->result[path] = 0; + dpk_info->dpk_gs[path] = 0x5b; + dpk_info->pre_pwsf[path] = 0; + dpk_info->thermal_dpk[path] = rtw8822c_dpk_thermal_read(rtwdev, + path); + } +} + +static void rtw8822c_dpk_calibrate(struct rtw_dev *rtwdev, u8 path) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u32 dpk_txagc; + u8 dpk_fail; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[DPK] s%d dpk start\n", path); + + dpk_txagc = rtw8822c_dpk_gainloss(rtwdev, path); + + dpk_fail = rtw8822c_dpk_by_path(rtwdev, dpk_txagc, path); + + if (!rtw8822c_dpk_check_pass(rtwdev, dpk_fail, dpk_txagc, path)) + rtw_err(rtwdev, "failed to do dpk calibration\n"); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[DPK] s%d dpk finish\n", path); + + if (dpk_info->result[path]) + set_bit(path, dpk_info->dpk_path_ok); +} + +static void rtw8822c_dpk_path_select(struct rtw_dev *rtwdev) +{ + rtw8822c_dpk_calibrate(rtwdev, RF_PATH_A); + rtw8822c_dpk_calibrate(rtwdev, RF_PATH_B); + rtw8822c_dpk_on(rtwdev, RF_PATH_A); + rtw8822c_dpk_on(rtwdev, RF_PATH_B); + rtw8822c_dpk_cal_coef1(rtwdev); +} + +static void rtw8822c_dpk_enable_disable(struct rtw_dev *rtwdev) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u32 mask = BIT(15) | BIT(14); + + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0xc); + + rtw_write32_mask(rtwdev, REG_DPD_CTL1_S0, BIT_DPD_EN, + dpk_info->is_dpk_pwr_on); + rtw_write32_mask(rtwdev, REG_DPD_CTL1_S1, BIT_DPD_EN, + dpk_info->is_dpk_pwr_on); + + if (test_bit(RF_PATH_A, dpk_info->dpk_path_ok)) { + rtw_write32_mask(rtwdev, REG_DPD_CTL1_S0, mask, 0x0); + rtw_write8(rtwdev, REG_DPD_CTL0_S0, dpk_info->dpk_gs[RF_PATH_A]); + } + if (test_bit(RF_PATH_B, dpk_info->dpk_path_ok)) { + rtw_write32_mask(rtwdev, REG_DPD_CTL1_S1, mask, 0x0); + rtw_write8(rtwdev, REG_DPD_CTL0_S1, dpk_info->dpk_gs[RF_PATH_B]); + } +} + +static void rtw8822c_dpk_reload_data(struct rtw_dev *rtwdev) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u8 path; + + if (!test_bit(RF_PATH_A, dpk_info->dpk_path_ok) && + !test_bit(RF_PATH_B, dpk_info->dpk_path_ok) && + dpk_info->dpk_ch == 0) + return; + + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, + 0x8 | (path << 1)); + if (dpk_info->dpk_band == RTW_BAND_2G) + rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f100000); + else + rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f0d0000); + + rtw_write8(rtwdev, REG_DPD_AGC, dpk_info->dpk_txagc[path]); + + rtw8822c_dpk_coef_write(rtwdev, path, + test_bit(path, dpk_info->dpk_path_ok)); + + rtw8822c_dpk_one_shot(rtwdev, path, RTW_DPK_DPK_ON); + + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0xc); + + if (path == RF_PATH_A) + rtw_write32_mask(rtwdev, REG_DPD_CTL0_S0, BIT_GS_PWSF, + dpk_info->dpk_gs[path]); + else + rtw_write32_mask(rtwdev, REG_DPD_CTL0_S1, BIT_GS_PWSF, + dpk_info->dpk_gs[path]); + } + rtw8822c_dpk_cal_coef1(rtwdev); +} + +static bool rtw8822c_dpk_reload(struct rtw_dev *rtwdev) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u8 channel; + + dpk_info->is_reload = false; + + channel = (u8)(rtw_read_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK) & 0xff); + + if (channel == dpk_info->dpk_ch) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[DPK] DPK reload for CH%d!!\n", dpk_info->dpk_ch); + rtw8822c_dpk_reload_data(rtwdev); + dpk_info->is_reload = true; + } + + return dpk_info->is_reload; +} + +static void rtw8822c_do_dpk(struct rtw_dev *rtwdev) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + struct rtw_backup_info bckp[DPK_BB_REG_NUM]; + u32 rf_reg_backup[DPK_RF_REG_NUM][DPK_RF_PATH_NUM]; + u32 bb_reg[DPK_BB_REG_NUM] = { + 0x520, 0x820, 0x824, 0x1c3c, 0x1d58, 0x1864, + 0x4164, 0x180c, 0x410c, 0x186c, 0x416c, + 0x1a14, 0x1e70, 0x80c, 0x1d70, 0x1e7c, 0x18a4, 0x41a4}; + u32 rf_reg[DPK_RF_REG_NUM] = { + 0x0, 0x1a, 0x55, 0x63, 0x87, 0x8f, 0xde}; + u8 path; + + if (!dpk_info->is_dpk_pwr_on) { + rtw_dbg(rtwdev, RTW_DBG_RFK, "[DPK] Skip DPK due to DPD PWR off\n"); + return; + } else if (rtw8822c_dpk_reload(rtwdev)) { + return; + } + + for (path = RF_PATH_A; path < DPK_RF_PATH_NUM; path++) + ewma_thermal_init(&dpk_info->avg_thermal[path]); + + rtw8822c_dpk_information(rtwdev); + + rtw8822c_dpk_backup_registers(rtwdev, bb_reg, DPK_BB_REG_NUM, bckp); + rtw8822c_dpk_backup_rf_registers(rtwdev, rf_reg, rf_reg_backup); + + rtw8822c_dpk_mac_bb_setting(rtwdev); + rtw8822c_dpk_afe_setting(rtwdev, true); + rtw8822c_dpk_pre_setting(rtwdev); + rtw8822c_dpk_result_reset(rtwdev); + rtw8822c_dpk_path_select(rtwdev); + rtw8822c_dpk_afe_setting(rtwdev, false); + rtw8822c_dpk_enable_disable(rtwdev); + + rtw8822c_dpk_reload_rf_registers(rtwdev, rf_reg, rf_reg_backup); + for (path = 0; path < rtwdev->hal.rf_path_num; path++) + rtw8822c_dpk_rxbb_dc_cal(rtwdev, path); + rtw8822c_dpk_restore_registers(rtwdev, DPK_BB_REG_NUM, bckp); +} + +static void rtw8822c_phy_calibration(struct rtw_dev *rtwdev) +{ + rtw8822c_do_iqk(rtwdev); + rtw8822c_do_dpk(rtwdev); +} + +void rtw8822c_dpk_track(struct rtw_dev *rtwdev) +{ + struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; + u8 path; + u8 thermal_value[DPK_RF_PATH_NUM] = {0}; + s8 offset[DPK_RF_PATH_NUM], delta_dpk[DPK_RF_PATH_NUM]; + + if (dpk_info->thermal_dpk[0] == 0 && dpk_info->thermal_dpk[1] == 0) + return; + + for (path = 0; path < DPK_RF_PATH_NUM; path++) { + thermal_value[path] = rtw8822c_dpk_thermal_read(rtwdev, path); + ewma_thermal_add(&dpk_info->avg_thermal[path], + thermal_value[path]); + thermal_value[path] = + ewma_thermal_read(&dpk_info->avg_thermal[path]); + delta_dpk[path] = dpk_info->thermal_dpk[path] - + thermal_value[path]; + offset[path] = delta_dpk[path] - + dpk_info->thermal_dpk_delta[path]; + offset[path] &= 0x7f; + + if (offset[path] != dpk_info->pre_pwsf[path]) { + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, + 0x8 | (path << 1)); + rtw_write32_mask(rtwdev, 0x1b58, GENMASK(6, 0), + offset[path]); + dpk_info->pre_pwsf[path] = offset[path]; + } + } +} + +static const struct rtw_phy_cck_pd_reg +rtw8822c_cck_pd_reg[RTW_CHANNEL_WIDTH_40 + 1][RTW_RF_PATH_MAX] = { + { + {0x1ac8, 0x00ff, 0x1ad0, 0x01f}, + {0x1ac8, 0xff00, 0x1ad0, 0x3e0} + }, + { + {0x1acc, 0x00ff, 0x1ad0, 0x01F00000}, + {0x1acc, 0xff00, 0x1ad0, 0x3E000000} + }, +}; + +#define RTW_CCK_PD_MAX 255 +#define RTW_CCK_CS_MAX 31 +#define RTW_CCK_CS_ERR1 27 +#define RTW_CCK_CS_ERR2 29 +static void +rtw8822c_phy_cck_pd_set_reg(struct rtw_dev *rtwdev, + s8 pd_diff, s8 cs_diff, u8 bw, u8 nrx) +{ + u32 pd, cs; + + if (WARN_ON(bw > RTW_CHANNEL_WIDTH_40 || nrx >= RTW_RF_PATH_MAX)) + return; + + pd = rtw_read32_mask(rtwdev, + rtw8822c_cck_pd_reg[bw][nrx].reg_pd, + rtw8822c_cck_pd_reg[bw][nrx].mask_pd); + cs = rtw_read32_mask(rtwdev, + rtw8822c_cck_pd_reg[bw][nrx].reg_cs, + rtw8822c_cck_pd_reg[bw][nrx].mask_cs); + pd += pd_diff; + cs += cs_diff; + if (pd > RTW_CCK_PD_MAX) + pd = RTW_CCK_PD_MAX; + if (cs == RTW_CCK_CS_ERR1 || cs == RTW_CCK_CS_ERR2) + cs++; + else if (cs > RTW_CCK_CS_MAX) + cs = RTW_CCK_CS_MAX; + rtw_write32_mask(rtwdev, + rtw8822c_cck_pd_reg[bw][nrx].reg_pd, + rtw8822c_cck_pd_reg[bw][nrx].mask_pd, + pd); + rtw_write32_mask(rtwdev, + rtw8822c_cck_pd_reg[bw][nrx].reg_cs, + rtw8822c_cck_pd_reg[bw][nrx].mask_cs, + cs); +} + +static void rtw8822c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + s8 pd_lvl[4] = {2, 4, 6, 8}; + s8 cs_lvl[4] = {2, 2, 2, 4}; + u8 cur_lvl; + u8 nrx, bw; + + nrx = (u8)rtw_read32_mask(rtwdev, 0x1a2c, 0x60000); + bw = (u8)rtw_read32_mask(rtwdev, 0x9b0, 0xc); + + if (dm_info->cck_pd_lv[bw][nrx] == new_lvl) + return; + + cur_lvl = dm_info->cck_pd_lv[bw][nrx]; + + /* update cck pd info */ + dm_info->cck_fa_avg = CCK_FA_AVG_RESET; + + rtw8822c_phy_cck_pd_set_reg(rtwdev, + pd_lvl[new_lvl] - pd_lvl[cur_lvl], + cs_lvl[new_lvl] - cs_lvl[cur_lvl], + bw, nrx); + dm_info->cck_pd_lv[bw][nrx] = new_lvl; } static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = { @@ -2110,6 +3437,16 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = { RTW_PWR_INTF_ALL_MSK, RTW_PWR_ADDR_MAC, RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0092, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x20}, + {0x0093, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x04}, {0x0005, RTW_PWR_CUT_ALL_MSK, RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, @@ -2231,9 +3568,163 @@ static struct rtw_chip_ops rtw8822c_ops = { .set_tx_power_index = rtw8822c_set_tx_power_index, .cfg_ldo25 = rtw8822c_cfg_ldo25, .false_alarm_statistics = rtw8822c_false_alarm_statistics, - .do_iqk = rtw8822c_do_iqk, + .dpk_track = rtw8822c_dpk_track, + .phy_calibration = rtw8822c_phy_calibration, + .cck_pd_set = rtw8822c_phy_cck_pd_set, + + .coex_set_init = rtw8822c_coex_cfg_init, + .coex_set_ant_switch = NULL, + .coex_set_gnt_fix = rtw8822c_coex_cfg_gnt_fix, + .coex_set_gnt_debug = rtw8822c_coex_cfg_gnt_debug, + .coex_set_rfe_type = rtw8822c_coex_cfg_rfe_type, + .coex_set_wl_tx_power = rtw8822c_coex_cfg_wl_tx_power, + .coex_set_wl_rx_gain = rtw8822c_coex_cfg_wl_rx_gain, +}; + +/* Shared-Antenna Coex Table */ +static const struct coex_table_para table_sant_8822c[] = { + {0xffffffff, 0xffffffff}, /* case-0 */ + {0x55555555, 0x55555555}, + {0x66555555, 0x66555555}, + {0xaaaaaaaa, 0xaaaaaaaa}, + {0x5a5a5a5a, 0x5a5a5a5a}, + {0xfafafafa, 0xfafafafa}, /* case-5 */ + {0x6a5a6a5a, 0xaaaaaaaa}, + {0x6a5a56aa, 0x6a5a56aa}, + {0x6a5a5a5a, 0x6a5a5a5a}, + {0x66555555, 0x5a5a5a5a}, + {0x66555555, 0x6a5a5a5a}, /* case-10 */ + {0x66555555, 0xfafafafa}, + {0x66555555, 0x6a5a5aaa}, + {0x66555555, 0x5aaa5aaa}, + {0x66555555, 0xaaaa5aaa}, + {0x66555555, 0xaaaaaaaa}, /* case-15 */ + {0xffff55ff, 0xfafafafa}, + {0xffff55ff, 0x6afa5afa}, + {0xaaffffaa, 0xfafafafa}, + {0xaa5555aa, 0x5a5a5a5a}, + {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */ + {0xaa5555aa, 0xaaaaaaaa}, + {0xffffffff, 0x5a5a5a5a}, + {0xffffffff, 0x6a5a5a5a}, + {0xffffffff, 0x55555555}, + {0xffffffff, 0x6a5a5aaa}, /* case-25 */ + {0x55555555, 0x5a5a5a5a}, + {0x55555555, 0xaaaaaaaa}, + {0x55555555, 0x6a5a6a5a}, + {0x66556655, 0x66556655} +}; + +/* Non-Shared-Antenna Coex Table */ +static const struct coex_table_para table_nsant_8822c[] = { + {0xffffffff, 0xffffffff}, /* case-100 */ + {0x55555555, 0x55555555}, + {0x66555555, 0x66555555}, + {0xaaaaaaaa, 0xaaaaaaaa}, + {0x5a5a5a5a, 0x5a5a5a5a}, + {0xfafafafa, 0xfafafafa}, /* case-105 */ + {0x5afa5afa, 0x5afa5afa}, + {0x55555555, 0xfafafafa}, + {0x66555555, 0xfafafafa}, + {0x66555555, 0x5a5a5a5a}, + {0x66555555, 0x6a5a5a5a}, /* case-110 */ + {0x66555555, 0xaaaaaaaa}, + {0xffff55ff, 0xfafafafa}, + {0xffff55ff, 0x5afa5afa}, + {0xffff55ff, 0xaaaaaaaa}, + {0xaaffffaa, 0xfafafafa}, /* case-115 */ + {0xaaffffaa, 0x5afa5afa}, + {0xaaffffaa, 0xaaaaaaaa}, + {0xffffffff, 0xfafafafa}, + {0xffffffff, 0x5afa5afa}, + {0xffffffff, 0xaaaaaaaa},/* case-120 */ + {0x55ff55ff, 0x5afa5afa}, + {0x55ff55ff, 0xaaaaaaaa}, + {0x55ff55ff, 0x55ff55ff} +}; + +/* Shared-Antenna TDMA */ +static const struct coex_tdma_para tdma_sant_8822c[] = { + { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */ + { {0x61, 0x45, 0x03, 0x11, 0x11} }, + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, + { {0x61, 0x30, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, + { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */ + { {0x61, 0x45, 0x03, 0x11, 0x10} }, + { {0x61, 0x3a, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x20, 0x03, 0x11, 0x10} }, + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */ + { {0x61, 0x08, 0x03, 0x11, 0x14} }, + { {0x61, 0x08, 0x03, 0x10, 0x14} }, + { {0x51, 0x08, 0x03, 0x10, 0x54} }, + { {0x51, 0x08, 0x03, 0x10, 0x55} }, + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */ + { {0x51, 0x45, 0x03, 0x10, 0x10} }, + { {0x51, 0x3a, 0x03, 0x10, 0x50} }, + { {0x51, 0x30, 0x03, 0x10, 0x50} }, + { {0x51, 0x20, 0x03, 0x10, 0x50} }, + { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */ + { {0x51, 0x4a, 0x03, 0x10, 0x50} }, + { {0x51, 0x0c, 0x03, 0x10, 0x54} }, + { {0x55, 0x08, 0x03, 0x10, 0x54} }, + { {0x65, 0x10, 0x03, 0x11, 0x11} }, + { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ + { {0x51, 0x08, 0x03, 0x10, 0x50} } }; +/* Non-Shared-Antenna TDMA */ +static const struct coex_tdma_para tdma_nsant_8822c[] = { + { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-100 */ + { {0x61, 0x45, 0x03, 0x11, 0x11} }, + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, + { {0x61, 0x30, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, + { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */ + { {0x61, 0x45, 0x03, 0x11, 0x10} }, + { {0x61, 0x3a, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x20, 0x03, 0x11, 0x10} }, + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */ + { {0x61, 0x08, 0x03, 0x11, 0x14} }, + { {0x61, 0x08, 0x03, 0x10, 0x14} }, + { {0x51, 0x08, 0x03, 0x10, 0x54} }, + { {0x51, 0x08, 0x03, 0x10, 0x55} }, + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */ + { {0x51, 0x45, 0x03, 0x10, 0x50} }, + { {0x51, 0x3a, 0x03, 0x10, 0x50} }, + { {0x51, 0x30, 0x03, 0x10, 0x50} }, + { {0x51, 0x20, 0x03, 0x10, 0x50} }, + { {0x51, 0x10, 0x03, 0x10, 0x50} } /* case-120 */ +}; + +/* rssi in percentage % (dbm = % - 100) */ +static const u8 wl_rssi_step_8822c[] = {60, 50, 44, 30}; +static const u8 bt_rssi_step_8822c[] = {8, 15, 20, 25}; +static const struct coex_5g_afh_map afh_5g_8822c[] = { {0, 0, 0} }; + +/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */ +static const struct coex_rf_para rf_para_tx_8822c[] = { + {0, 0, false, 7}, /* for normal */ + {0, 16, false, 7}, /* for WL-CPT */ + {8, 17, true, 4}, + {7, 18, true, 4}, + {6, 19, true, 4}, + {5, 20, true, 4} +}; + +static const struct coex_rf_para rf_para_rx_8822c[] = { + {0, 0, false, 7}, /* for normal */ + {0, 16, false, 7}, /* for WL-CPT */ + {3, 24, true, 5}, + {2, 26, true, 5}, + {1, 27, true, 5}, + {0, 28, true, 5} +}; + +static_assert(ARRAY_SIZE(rf_para_tx_8822c) == ARRAY_SIZE(rf_para_rx_8822c)); + struct rtw_chip_info rtw8822c_hw_spec = { .ops = &rtw8822c_ops, .id = RTW_CHIP_TYPE_8822C, @@ -2272,6 +3763,34 @@ struct rtw_chip_info rtw8822c_hw_spec = { .rf_tbl = {&rtw8822c_rf_a_tbl, &rtw8822c_rf_b_tbl}, .rfe_defs = rtw8822c_rfe_defs, .rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs), + .en_dis_dpd = true, + .dpd_ratemask = DIS_DPD_RATEALL, + + .coex_para_ver = 0x19062706, + .bt_desired_ver = 0x6, + .scbd_support = true, + .new_scbd10_def = true, + .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, + .bt_rssi_type = COEX_BTRSSI_DBM, + .ant_isolation = 15, + .rssi_tolerance = 2, + .wl_rssi_step = wl_rssi_step_8822c, + .bt_rssi_step = bt_rssi_step_8822c, + .table_sant_num = ARRAY_SIZE(table_sant_8822c), + .table_sant = table_sant_8822c, + .table_nsant_num = ARRAY_SIZE(table_nsant_8822c), + .table_nsant = table_nsant_8822c, + .tdma_sant_num = ARRAY_SIZE(tdma_sant_8822c), + .tdma_sant = tdma_sant_8822c, + .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8822c), + .tdma_nsant = tdma_nsant_8822c, + .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8822c), + .wl_rf_para_tx = rf_para_tx_8822c, + .wl_rf_para_rx = rf_para_rx_8822c, + .bt_afh_span_bw20 = 0x24, + .bt_afh_span_bw40 = 0x36, + .afh_5g_num = ARRAY_SIZE(afh_5g_8822c), + .afh_5g = afh_5g_8822c, }; EXPORT_SYMBOL(rtw8822c_hw_spec); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h index 5ee1de41504d..438db74d8e7a 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h @@ -96,6 +96,35 @@ struct rtw8822c_efuse { }; }; +enum rtw8822c_dpk_agc_phase { + RTW_DPK_GAIN_CHECK, + RTW_DPK_GAIN_LARGE, + RTW_DPK_GAIN_LESS, + RTW_DPK_GL_LARGE, + RTW_DPK_GL_LESS, + RTW_DPK_LOSS_CHECK, + RTW_DPK_AGC_OUT, +}; + +enum rtw8822c_dpk_one_shot_action { + RTW_DPK_CAL_PWR, + RTW_DPK_GAIN_LOSS, + RTW_DPK_DO_DPK, + RTW_DPK_DPK_ON, + RTW_DPK_DAGC, + RTW_DPK_ACTION_MAX +}; + +void rtw8822c_parse_tbl_dpk(struct rtw_dev *rtwdev, + const struct rtw_table *tbl); + +#define RTW_DECL_TABLE_DPK(name) \ +const struct rtw_table name ## _tbl = { \ + .data = name, \ + .size = ARRAY_SIZE(name), \ + .parse = rtw8822c_parse_tbl_dpk, \ +} + #define DACK_PATH_8822C 2 #define DACK_REG_8822C 16 #define DACK_RF_8822C 1 @@ -176,6 +205,7 @@ struct rtw8822c_efuse { #define REG_TXF7 0x1ab0 #define REG_CCK_SOURCE 0x1abc #define BIT_NBI_EN BIT(30) +#define REG_IQKSTAT 0x1b10 #define REG_TXANT 0x1c28 #define REG_ENCCK 0x1c3c #define BIT_CCK_BLK_EN BIT(1) @@ -197,6 +227,7 @@ struct rtw8822c_efuse { #define REG_OFDM_FACNT3 0x2d0c #define REG_OFDM_FACNT4 0x2d10 #define REG_OFDM_FACNT5 0x2d20 +#define REG_RPT_CIP 0x2d9c #define REG_OFDM_TXCNT 0x2de0 #define REG_ORITXCODE2 0x4100 #define REG_3WIRE2 0x410c @@ -206,4 +237,59 @@ struct rtw8822c_efuse { #define REG_DCKB_Q_0 0x41d8 #define REG_DCKB_Q_1 0x41dc +#define RF_MODE_TRXAGC 0x00 +#define RF_RXAGC_OFFSET 0x19 +#define RF_BW_TRXBB 0x1a +#define RF_TX_GAIN_OFFSET 0x55 +#define RF_TX_GAIN 0x56 +#define RF_TXA_LB_SW 0x63 +#define RF_RXG_GAIN 0x87 +#define RF_RXA_MIX_GAIN 0x8a +#define RF_EXT_TIA_BW 0x8f +#define RF_DEBUG 0xde + +#define REG_NCTL0 0x1b00 +#define REG_DPD_CTL0_S0 0x1b04 +#define REG_DPD_CTL1_S0 0x1b08 +#define REG_IQK_CTL1 0x1b20 +#define REG_DPD_LUT0 0x1b44 +#define REG_DPD_CTL0_S1 0x1b5c +#define REG_DPD_LUT3 0x1b60 +#define REG_DPD_CTL1_S1 0x1b60 +#define REG_DPD_AGC 0x1b67 +#define REG_DPD_CTL0 0x1bb4 +#define REG_R_CONFIG 0x1bcc +#define REG_RXSRAM_CTL 0x1bd4 +#define REG_DPD_CTL11 0x1be4 +#define REG_DPD_CTL12 0x1be8 +#define REG_DPD_CTL15 0x1bf4 +#define REG_DPD_CTL16 0x1bf8 +#define REG_STAT_RPT 0x1bfc + +#define BIT_EXT_TIA_BW BIT(1) +#define BIT_DE_TRXBW BIT(2) +#define BIT_DE_TX_GAIN BIT(16) +#define BIT_RXG_GAIN BIT(18) +#define BIT_DE_PWR_TRIM BIT(19) +#define BIT_INNER_LB BIT(21) +#define BIT_BYPASS_DPD BIT(25) +#define BIT_DPD_EN BIT(31) +#define BIT_SUBPAGE GENMASK(3, 0) +#define BIT_TXAGC GENMASK(4, 0) +#define BIT_GAIN_TXBB GENMASK(4, 0) +#define BIT_LB_ATT GENMASK(4, 2) +#define BIT_RXA_MIX_GAIN GENMASK(4, 3) +#define BIT_IQ_SWITCH GENMASK(5, 0) +#define BIT_DPD_CLK GENMASK(7, 4) +#define BIT_RXAGC GENMASK(9, 5) +#define BIT_BW_RXBB GENMASK(11, 10) +#define BIT_LB_SW GENMASK(13, 12) +#define BIT_BW_TXBB GENMASK(14, 12) +#define BIT_GLOSS_DB GENMASK(14, 12) +#define BIT_TXA_LB_ATT GENMASK(15, 14) +#define BIT_TX_OFFSET_VAL GENMASK(18, 14) +#define BIT_RPT_SEL GENMASK(20, 16) +#define BIT_GS_PWSF GENMASK(27, 0) +#define BIT_RPT_DGAIN GENMASK(27, 16) +#define BIT_TX_CFIR GENMASK(31, 30) #endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c index 18e609a69829..e2dd4c766077 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c @@ -4,6 +4,7 @@ #include "main.h" #include "phy.h" +#include "rtw8822c.h" #include "rtw8822c_table.h" static const u32 rtw8822c_mac[] = { @@ -13,69 +14,69 @@ RTW_DECL_TABLE_PHY_COND(rtw8822c_mac, rtw_phy_cfg_mac); static const u32 rtw8822c_agc[] = { 0x1D90, 0x300001FF, - 0x1D90, 0x300101FF, - 0x1D90, 0x300201FE, - 0x1D90, 0x300301FD, - 0x1D90, 0x300401FC, - 0x1D90, 0x300501FB, - 0x1D90, 0x300601FA, - 0x1D90, 0x300701F9, - 0x1D90, 0x300801F8, - 0x1D90, 0x300901F7, - 0x1D90, 0x300A01F6, - 0x1D90, 0x300B01F5, - 0x1D90, 0x300C01F4, - 0x1D90, 0x300D01F3, - 0x1D90, 0x300E01F2, - 0x1D90, 0x300F01F1, - 0x1D90, 0x301001F0, - 0x1D90, 0x301101EF, - 0x1D90, 0x301201EE, - 0x1D90, 0x301301ED, - 0x1D90, 0x301401EC, - 0x1D90, 0x301501EB, - 0x1D90, 0x30160192, - 0x1D90, 0x30170191, - 0x1D90, 0x30180190, - 0x1D90, 0x3019018F, - 0x1D90, 0x301A018E, - 0x1D90, 0x301B018D, - 0x1D90, 0x301C018C, - 0x1D90, 0x301D018B, - 0x1D90, 0x301E018A, - 0x1D90, 0x301F0189, - 0x1D90, 0x30200188, - 0x1D90, 0x30210187, - 0x1D90, 0x30220186, - 0x1D90, 0x30230185, - 0x1D90, 0x3024014B, - 0x1D90, 0x3025014A, - 0x1D90, 0x30260149, - 0x1D90, 0x30270148, - 0x1D90, 0x30280147, - 0x1D90, 0x30290146, - 0x1D90, 0x302A0145, - 0x1D90, 0x302B0144, - 0x1D90, 0x302C0143, - 0x1D90, 0x302D0142, - 0x1D90, 0x302E00C8, - 0x1D90, 0x302F00C7, - 0x1D90, 0x303000C6, - 0x1D90, 0x303100C5, - 0x1D90, 0x303200C4, - 0x1D90, 0x30330088, - 0x1D90, 0x30340087, - 0x1D90, 0x30350086, + 0x1D90, 0x300101FE, + 0x1D90, 0x300201FD, + 0x1D90, 0x300301FC, + 0x1D90, 0x300401FB, + 0x1D90, 0x300501FA, + 0x1D90, 0x300601F9, + 0x1D90, 0x300701F8, + 0x1D90, 0x300801F7, + 0x1D90, 0x300901F6, + 0x1D90, 0x300A01F5, + 0x1D90, 0x300B01F4, + 0x1D90, 0x300C01F3, + 0x1D90, 0x300D01F2, + 0x1D90, 0x300E01F1, + 0x1D90, 0x300F01F0, + 0x1D90, 0x301001EF, + 0x1D90, 0x301101EE, + 0x1D90, 0x301201ED, + 0x1D90, 0x301301EC, + 0x1D90, 0x301401EB, + 0x1D90, 0x301501EA, + 0x1D90, 0x301601E9, + 0x1D90, 0x301701E8, + 0x1D90, 0x301801E7, + 0x1D90, 0x301901E5, + 0x1D90, 0x301A01E4, + 0x1D90, 0x301B01C5, + 0x1D90, 0x301C01C4, + 0x1D90, 0x301D01C3, + 0x1D90, 0x301E01C2, + 0x1D90, 0x301F0188, + 0x1D90, 0x30200187, + 0x1D90, 0x30210186, + 0x1D90, 0x30220184, + 0x1D90, 0x30230183, + 0x1D90, 0x30240182, + 0x1D90, 0x30250181, + 0x1D90, 0x30260148, + 0x1D90, 0x30270147, + 0x1D90, 0x30280146, + 0x1D90, 0x30290144, + 0x1D90, 0x302A0143, + 0x1D90, 0x302B0142, + 0x1D90, 0x302C0141, + 0x1D90, 0x302D00C8, + 0x1D90, 0x302E00C7, + 0x1D90, 0x302F00C6, + 0x1D90, 0x303000C5, + 0x1D90, 0x303100C4, + 0x1D90, 0x303200C3, + 0x1D90, 0x30330048, + 0x1D90, 0x30340047, + 0x1D90, 0x30350046, 0x1D90, 0x30360045, - 0x1D90, 0x30370044, - 0x1D90, 0x30380043, + 0x1D90, 0x30370025, + 0x1D90, 0x30380024, 0x1D90, 0x30390023, 0x1D90, 0x303A0022, 0x1D90, 0x303B0021, 0x1D90, 0x303C0020, - 0x1D90, 0x303D0002, - 0x1D90, 0x303E0001, - 0x1D90, 0x303F0000, + 0x1D90, 0x303D0003, + 0x1D90, 0x303E0002, + 0x1D90, 0x303F0001, 0x1D90, 0x304000FF, 0x1D90, 0x304100FF, 0x1D90, 0x304200FF, @@ -418,48 +419,48 @@ static const u32 rtw8822c_agc[] = { 0x1D90, 0x319301EB, 0x1D90, 0x319401EA, 0x1D90, 0x319501E9, - 0x1D90, 0x3196018F, - 0x1D90, 0x3197018E, - 0x1D90, 0x3198018D, - 0x1D90, 0x3199018C, - 0x1D90, 0x319A018B, - 0x1D90, 0x319B018A, - 0x1D90, 0x319C0189, - 0x1D90, 0x319D0188, - 0x1D90, 0x319E0187, - 0x1D90, 0x319F0186, - 0x1D90, 0x31A00185, - 0x1D90, 0x31A10184, - 0x1D90, 0x31A20183, - 0x1D90, 0x31A30182, - 0x1D90, 0x31A40149, - 0x1D90, 0x31A50148, - 0x1D90, 0x31A60147, - 0x1D90, 0x31A70146, - 0x1D90, 0x31A80145, - 0x1D90, 0x31A90144, - 0x1D90, 0x31AA0143, - 0x1D90, 0x31AB0142, - 0x1D90, 0x31AC0141, - 0x1D90, 0x31AD0140, - 0x1D90, 0x31AE00C7, - 0x1D90, 0x31AF00C6, - 0x1D90, 0x31B000C5, - 0x1D90, 0x31B100C4, - 0x1D90, 0x31B200C3, - 0x1D90, 0x31B30088, - 0x1D90, 0x31B40087, - 0x1D90, 0x31B50086, - 0x1D90, 0x31B60045, - 0x1D90, 0x31B70044, - 0x1D90, 0x31B80043, + 0x1D90, 0x319601E7, + 0x1D90, 0x319701E6, + 0x1D90, 0x319801E5, + 0x1D90, 0x319901E4, + 0x1D90, 0x319A01A8, + 0x1D90, 0x319B01A7, + 0x1D90, 0x319C01A6, + 0x1D90, 0x319D01A5, + 0x1D90, 0x319E0185, + 0x1D90, 0x319F0184, + 0x1D90, 0x31A00183, + 0x1D90, 0x31A10182, + 0x1D90, 0x31A20149, + 0x1D90, 0x31A30148, + 0x1D90, 0x31A40147, + 0x1D90, 0x31A50145, + 0x1D90, 0x31A60144, + 0x1D90, 0x31A70143, + 0x1D90, 0x31A80142, + 0x1D90, 0x31A900E6, + 0x1D90, 0x31AA00E5, + 0x1D90, 0x31AB00C9, + 0x1D90, 0x31AC00C8, + 0x1D90, 0x31AD00C7, + 0x1D90, 0x31AE00C6, + 0x1D90, 0x31AF00C5, + 0x1D90, 0x31B000C4, + 0x1D90, 0x31B100C3, + 0x1D90, 0x31B20088, + 0x1D90, 0x31B30087, + 0x1D90, 0x31B40086, + 0x1D90, 0x31B50085, + 0x1D90, 0x31B60026, + 0x1D90, 0x31B70025, + 0x1D90, 0x31B80024, 0x1D90, 0x31B90023, 0x1D90, 0x31BA0022, 0x1D90, 0x31BB0021, 0x1D90, 0x31BC0020, - 0x1D90, 0x31BD0002, - 0x1D90, 0x31BE0001, - 0x1D90, 0x31BF0000, + 0x1D90, 0x31BD0003, + 0x1D90, 0x31BE0002, + 0x1D90, 0x31BF0001, 0x1D70, 0x22222222, 0x1D70, 0x20202020, }; @@ -478,7 +479,7 @@ static const u32 rtw8822c_bb[] = { 0x814, 0x00904080, 0x818, 0xC30056F1, 0x81C, 0x00050000, - 0x820, 0x11111133, + 0x820, 0x11111111, 0x824, 0xC3C3CCC4, 0x828, 0x30FB186C, 0x82C, 0x185D6556, @@ -604,7 +605,7 @@ static const u32 rtw8822c_bb[] = { 0xA14, 0x00000000, 0xA18, 0x00000000, 0xA1C, 0x00000000, - 0xA20, 0xEB31B333, + 0xA20, 0xCB31B333, 0xA24, 0x00275485, 0xA28, 0x00166366, 0xA2C, 0x00275485, @@ -722,7 +723,7 @@ static const u32 rtw8822c_bb[] = { 0xBF0, 0x00000000, 0xBF4, 0x00000000, 0xBF8, 0x00000000, - 0xC00, 0x1C8BA0D6, + 0xC00, 0x0C8BA0D6, 0xC04, 0x00000001, 0xC08, 0x00000000, 0xC0C, 0x02F1D8B7, @@ -774,8 +775,8 @@ static const u32 rtw8822c_bb[] = { 0xCC4, 0x00200400, 0xCC8, 0x0B200400, 0xCCC, 0x00600400, - 0xCD0, 0x00000092, - 0xCD4, 0x22220000, + 0xCD0, 0x22220092, + 0xCD4, 0x22220707, 0xCD8, 0x22222222, 0xCDC, 0x22222222, 0xCE0, 0x22222222, @@ -990,7 +991,7 @@ static const u32 rtw8822c_bb[] = { 0x1C34, 0xE4E42000, 0x1C38, 0xFFA1005E, 0x1C40, 0x8F588837, - 0x1C44, 0x04400300, + 0x1C44, 0x04400700, 0x1C48, 0x00000000, 0x1C4C, 0x00000200, 0x1C50, 0x8E588837, @@ -1108,7 +1109,7 @@ static const u32 rtw8822c_bb[] = { 0x1E20, 0x00000000, 0x1E24, 0x80003000, 0x1E28, 0x000CC0C3, - 0x1E2C, 0xE4E40404, + 0x1E2C, 0xE4E40000, 0x1E30, 0xE4E4E4E4, 0x1E34, 0xF3001234, 0x1E38, 0x00000000, @@ -1124,7 +1125,7 @@ static const u32 rtw8822c_bb[] = { 0x1E60, 0x00000000, 0x1E64, 0xF3A00001, 0x1E68, 0x0028846E, - 0x1E6C, 0x40274906, + 0x1E6C, 0x40374906, 0x1E70, 0x00001000, 0x1E74, 0x00000000, 0x1E78, 0x00000000, @@ -1485,11 +1486,11 @@ static const u32 rtw8822c_bb[] = { 0x1AD0, 0xA33529AD, 0x1AD4, 0x0D8D8452, 0x1AD8, 0x08024024, - 0x1ADC, 0x000DB001, + 0x1ADC, 0x000D0001, 0x1AE0, 0x00600391, 0x1AE4, 0x08000080, - 0x1AE8, 0x00000002, - 0x1AEC, 0x00000000, + 0x1AE8, 0xC2100002, + 0x1AEC, 0x000000F6, 0x1AF0, 0x00000000, 0x1AF4, 0x00000000, 0x1AF8, 0x00000000, @@ -1756,6 +1757,7 @@ static const u32 rtw8822c_bb[] = { 0x1D94, 0x40FF0000, 0xC0C, 0x02F1D8B7, 0x1EE8, 0x00000000, + }; RTW_DECL_TABLE_PHY_COND(rtw8822c_bb, rtw_phy_cfg_bb); @@ -1828,6 +1830,10 @@ static const u32 rtw8822c_rf_a[] = { 0x08E, 0x000A5540, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x08E, 0x000A5540, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x08E, 0x000A5540, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x08E, 0x000A5540, 0xA0000000, 0x00000000, 0x08E, 0x000A5540, 0xB0000000, 0x00000000, @@ -1846,6 +1852,10 @@ static const u32 rtw8822c_rf_a[] = { 0x085, 0x0006A06C, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x085, 0x0006A06C, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x085, 0x0006A06C, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x085, 0x0006A06C, 0xA0000000, 0x00000000, 0x085, 0x0006A06C, 0xB0000000, 0x00000000, @@ -1903,6 +1913,24 @@ static const u32 rtw8822c_rf_a[] = { 0x033, 0x00000002, 0x03F, 0x0000002A, 0x0EE, 0x00000000, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00000010, + 0x033, 0x00000001, + 0x03F, 0x0000002A, + 0x033, 0x00000001, + 0x03F, 0x0000002A, + 0x033, 0x00000002, + 0x03F, 0x0000002A, + 0x0EE, 0x00000000, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00000010, + 0x033, 0x00000001, + 0x03F, 0x0000002A, + 0x033, 0x00000001, + 0x03F, 0x0000002A, + 0x033, 0x00000002, + 0x03F, 0x0000002A, + 0x0EE, 0x00000000, 0xA0000000, 0x00000000, 0x0EE, 0x00000010, 0x033, 0x00000001, @@ -2069,6 +2097,58 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00000180, 0x033, 0x00000004, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00010000, + 0x033, 0x0000000F, + 0x03F, 0x000773C0, + 0x033, 0x0000000E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000000D, + 0x03F, 0x000773E8, + 0x033, 0x0000000C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000000B, + 0x03F, 0x00000287, + 0x033, 0x0000000A, + 0x03F, 0x000002A8, + 0x033, 0x00000009, + 0x03F, 0x00000207, + 0x033, 0x00000008, + 0x03F, 0x000FF280, + 0x033, 0x00000007, + 0x03F, 0x00000200, + 0x033, 0x00000006, + 0x03F, 0x000001C0, + 0x033, 0x00000005, + 0x03F, 0x00000180, + 0x033, 0x00000004, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00010000, + 0x033, 0x0000000F, + 0x03F, 0x000773C0, + 0x033, 0x0000000E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000000D, + 0x03F, 0x000773E8, + 0x033, 0x0000000C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000000B, + 0x03F, 0x00000287, + 0x033, 0x0000000A, + 0x03F, 0x000002A8, + 0x033, 0x00000009, + 0x03F, 0x00000207, + 0x033, 0x00000008, + 0x03F, 0x000FF280, + 0x033, 0x00000007, + 0x03F, 0x00000200, + 0x033, 0x00000006, + 0x03F, 0x000001C0, + 0x033, 0x00000005, + 0x03F, 0x00000180, + 0x033, 0x00000004, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x0EF, 0x00010000, 0x033, 0x0000000F, @@ -2248,6 +2328,56 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00000180, 0x033, 0x00000014, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000001F, + 0x03F, 0x000773C0, + 0x033, 0x0000001E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000001D, + 0x03F, 0x000773E8, + 0x033, 0x0000001C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000001B, + 0x03F, 0x00000287, + 0x033, 0x0000001A, + 0x03F, 0x000002A8, + 0x033, 0x00000019, + 0x03F, 0x00000207, + 0x033, 0x00000018, + 0x03F, 0x000FF280, + 0x033, 0x00000017, + 0x03F, 0x00000200, + 0x033, 0x00000016, + 0x03F, 0x000001C0, + 0x033, 0x00000015, + 0x03F, 0x00000180, + 0x033, 0x00000014, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000001F, + 0x03F, 0x000773C0, + 0x033, 0x0000001E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000001D, + 0x03F, 0x000773E8, + 0x033, 0x0000001C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000001B, + 0x03F, 0x00000287, + 0x033, 0x0000001A, + 0x03F, 0x000002A8, + 0x033, 0x00000019, + 0x03F, 0x00000207, + 0x033, 0x00000018, + 0x03F, 0x000FF280, + 0x033, 0x00000017, + 0x03F, 0x00000200, + 0x033, 0x00000016, + 0x03F, 0x000001C0, + 0x033, 0x00000015, + 0x03F, 0x00000180, + 0x033, 0x00000014, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x033, 0x0000001F, 0x03F, 0x000773E8, @@ -2426,6 +2556,56 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00000180, 0x033, 0x00000024, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000002F, + 0x03F, 0x000773C0, + 0x033, 0x0000002E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000002D, + 0x03F, 0x000773E8, + 0x033, 0x0000002C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000002B, + 0x03F, 0x00000287, + 0x033, 0x0000002A, + 0x03F, 0x000002A8, + 0x033, 0x00000029, + 0x03F, 0x00000207, + 0x033, 0x00000028, + 0x03F, 0x000FF280, + 0x033, 0x00000027, + 0x03F, 0x00000200, + 0x033, 0x00000026, + 0x03F, 0x000001C0, + 0x033, 0x00000025, + 0x03F, 0x00000180, + 0x033, 0x00000024, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000002F, + 0x03F, 0x000773C0, + 0x033, 0x0000002E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000002D, + 0x03F, 0x000773E8, + 0x033, 0x0000002C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000002B, + 0x03F, 0x00000287, + 0x033, 0x0000002A, + 0x03F, 0x000002A8, + 0x033, 0x00000029, + 0x03F, 0x00000207, + 0x033, 0x00000028, + 0x03F, 0x000FF280, + 0x033, 0x00000027, + 0x03F, 0x00000200, + 0x033, 0x00000026, + 0x03F, 0x000001C0, + 0x033, 0x00000025, + 0x03F, 0x00000180, + 0x033, 0x00000024, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x033, 0x0000002F, 0x03F, 0x000773E8, @@ -2604,6 +2784,56 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00000180, 0x033, 0x00000034, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000003F, + 0x03F, 0x000773C0, + 0x033, 0x0000003E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000003D, + 0x03F, 0x000773E8, + 0x033, 0x0000003C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000003B, + 0x03F, 0x00000287, + 0x033, 0x0000003A, + 0x03F, 0x000002A8, + 0x033, 0x00000039, + 0x03F, 0x00000207, + 0x033, 0x00000038, + 0x03F, 0x000FF280, + 0x033, 0x00000037, + 0x03F, 0x00000200, + 0x033, 0x00000036, + 0x03F, 0x000001C0, + 0x033, 0x00000035, + 0x03F, 0x00000180, + 0x033, 0x00000034, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000003F, + 0x03F, 0x000773C0, + 0x033, 0x0000003E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000003D, + 0x03F, 0x000773E8, + 0x033, 0x0000003C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000003B, + 0x03F, 0x00000287, + 0x033, 0x0000003A, + 0x03F, 0x000002A8, + 0x033, 0x00000039, + 0x03F, 0x00000207, + 0x033, 0x00000038, + 0x03F, 0x000FF280, + 0x033, 0x00000037, + 0x03F, 0x00000200, + 0x033, 0x00000036, + 0x03F, 0x000001C0, + 0x033, 0x00000035, + 0x03F, 0x00000180, + 0x033, 0x00000034, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x033, 0x0000003F, 0x03F, 0x000773E8, @@ -2782,6 +3012,56 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00000180, 0x033, 0x00000044, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000004F, + 0x03F, 0x000773C0, + 0x033, 0x0000004E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000004D, + 0x03F, 0x000773E8, + 0x033, 0x0000004C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000004B, + 0x03F, 0x00000287, + 0x033, 0x0000004A, + 0x03F, 0x000002A8, + 0x033, 0x00000049, + 0x03F, 0x00000207, + 0x033, 0x00000048, + 0x03F, 0x000FF280, + 0x033, 0x00000047, + 0x03F, 0x00000200, + 0x033, 0x00000046, + 0x03F, 0x000001C0, + 0x033, 0x00000045, + 0x03F, 0x00000180, + 0x033, 0x00000044, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000004F, + 0x03F, 0x000773C0, + 0x033, 0x0000004E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000004D, + 0x03F, 0x000773E8, + 0x033, 0x0000004C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000004B, + 0x03F, 0x00000287, + 0x033, 0x0000004A, + 0x03F, 0x000002A8, + 0x033, 0x00000049, + 0x03F, 0x00000207, + 0x033, 0x00000048, + 0x03F, 0x000FF280, + 0x033, 0x00000047, + 0x03F, 0x00000200, + 0x033, 0x00000046, + 0x03F, 0x000001C0, + 0x033, 0x00000045, + 0x03F, 0x00000180, + 0x033, 0x00000044, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x033, 0x0000004F, 0x03F, 0x000773E8, @@ -2960,6 +3240,56 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00000180, 0x033, 0x00000054, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000005F, + 0x03F, 0x000773C0, + 0x033, 0x0000005E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000005D, + 0x03F, 0x000773E8, + 0x033, 0x0000005C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000005B, + 0x03F, 0x00000287, + 0x033, 0x0000005A, + 0x03F, 0x000002A8, + 0x033, 0x00000059, + 0x03F, 0x00000207, + 0x033, 0x00000058, + 0x03F, 0x000FF280, + 0x033, 0x00000057, + 0x03F, 0x00000200, + 0x033, 0x00000056, + 0x03F, 0x000001C0, + 0x033, 0x00000055, + 0x03F, 0x00000180, + 0x033, 0x00000054, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000005F, + 0x03F, 0x000773C0, + 0x033, 0x0000005E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000005D, + 0x03F, 0x000773E8, + 0x033, 0x0000005C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000005B, + 0x03F, 0x00000287, + 0x033, 0x0000005A, + 0x03F, 0x000002A8, + 0x033, 0x00000059, + 0x03F, 0x00000207, + 0x033, 0x00000058, + 0x03F, 0x000FF280, + 0x033, 0x00000057, + 0x03F, 0x00000200, + 0x033, 0x00000056, + 0x03F, 0x000001C0, + 0x033, 0x00000055, + 0x03F, 0x00000180, + 0x033, 0x00000054, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x033, 0x0000005F, 0x03F, 0x000773E8, @@ -3000,6 +3330,10 @@ static const u32 rtw8822c_rf_a[] = { 0x0EF, 0x00000000, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x0EF, 0x00000000, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000000, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000000, 0xA0000000, 0x00000000, 0x0EF, 0x00000000, 0xB0000000, 0x00000000, @@ -3899,6 +4233,300 @@ static const u32 rtw8822c_rf_a[] = { 0x03E, 0x00000000, 0x03F, 0x0002C010, 0x0EF, 0x00000000, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00020000, + 0x033, 0x00000000, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000001, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000002, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000003, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000004, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000005, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000006, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000007, + 0x03E, 0x00000000, + 0x03F, 0x0002F81C, + 0x033, 0x00000008, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000009, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000000A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000000B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000000C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000000D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000000E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000000F, + 0x03E, 0x00000000, + 0x03F, 0x0002F81C, + 0x033, 0x00000010, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000011, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000012, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000013, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000014, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000015, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000016, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000017, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000018, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000019, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000001A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000001B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000001C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000001D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000001E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000001F, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000020, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000021, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000022, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000023, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000024, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000025, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000026, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000027, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000028, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000029, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000002A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000002B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000002C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000002D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000002E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000002F, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x0EF, 0x00000000, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00020000, + 0x033, 0x00000000, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000001, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000002, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000003, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000004, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000005, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000006, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000007, + 0x03E, 0x00000000, + 0x03F, 0x0002F81C, + 0x033, 0x00000008, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000009, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000000A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000000B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000000C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000000D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000000E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000000F, + 0x03E, 0x00000000, + 0x03F, 0x0002F81C, + 0x033, 0x00000010, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000011, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000012, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000013, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000014, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000015, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000016, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000017, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000018, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000019, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000001A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000001B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000001C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000001D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000001E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000001F, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000020, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000021, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000022, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000023, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000024, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000025, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000026, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000027, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000028, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000029, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000002A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000002B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000002C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000002D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000002E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000002F, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x0EF, 0x00000000, 0xA0000000, 0x00000000, 0x0EF, 0x00020000, 0x033, 0x00000000, @@ -4063,6 +4691,10 @@ static const u32 rtw8822c_rf_a[] = { 0x063, 0x00000002, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x00000002, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x00000002, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x00000002, 0xA0000000, 0x00000000, 0x063, 0x00000C02, 0xB0000000, 0x00000000, @@ -4176,59 +4808,113 @@ static const u32 rtw8822c_rf_a[] = { 0x030, 0x00018207, 0x030, 0x00019237, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x030, 0x00000237, - 0x030, 0x00001237, - 0x030, 0x00002237, - 0x030, 0x00003237, - 0x030, 0x00004207, - 0x030, 0x00005237, - 0x030, 0x00006237, - 0x030, 0x00007237, - 0x030, 0x00008207, - 0x030, 0x00009237, - 0x030, 0x0000A237, - 0x030, 0x0000B237, - 0x030, 0x0000C237, - 0x030, 0x0000D237, - 0x030, 0x0000E207, - 0x030, 0x0000F237, - 0x030, 0x00010237, - 0x030, 0x00011237, - 0x030, 0x00012207, - 0x030, 0x00013237, - 0x030, 0x00014237, - 0x030, 0x00015237, - 0x030, 0x00016207, - 0x030, 0x00017237, - 0x030, 0x00018207, - 0x030, 0x00019237, + 0x030, 0x00000238, + 0x030, 0x00001238, + 0x030, 0x00002238, + 0x030, 0x00003238, + 0x030, 0x00004228, + 0x030, 0x00005238, + 0x030, 0x00006238, + 0x030, 0x00007238, + 0x030, 0x00008228, + 0x030, 0x00009238, + 0x030, 0x0000A238, + 0x030, 0x0000B238, + 0x030, 0x0000C238, + 0x030, 0x0000D238, + 0x030, 0x0000E228, + 0x030, 0x0000F238, + 0x030, 0x00010238, + 0x030, 0x00011238, + 0x030, 0x00012228, + 0x030, 0x00013238, + 0x030, 0x00014238, + 0x030, 0x00015238, + 0x030, 0x00016228, + 0x030, 0x00017238, + 0x030, 0x00018228, + 0x030, 0x00019238, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x030, 0x00000237, - 0x030, 0x00001237, - 0x030, 0x00002237, - 0x030, 0x00003237, - 0x030, 0x00004207, - 0x030, 0x00005237, - 0x030, 0x00006237, - 0x030, 0x00007237, - 0x030, 0x00008207, - 0x030, 0x00009237, - 0x030, 0x0000A237, - 0x030, 0x0000B237, - 0x030, 0x0000C237, - 0x030, 0x0000D237, - 0x030, 0x0000E207, - 0x030, 0x0000F237, - 0x030, 0x00010237, - 0x030, 0x00011237, - 0x030, 0x00012207, - 0x030, 0x00013237, - 0x030, 0x00014237, - 0x030, 0x00015237, - 0x030, 0x00016207, - 0x030, 0x00017237, - 0x030, 0x00018207, - 0x030, 0x00019237, + 0x030, 0x00000238, + 0x030, 0x00001238, + 0x030, 0x00002238, + 0x030, 0x00003238, + 0x030, 0x00004228, + 0x030, 0x00005238, + 0x030, 0x00006238, + 0x030, 0x00007238, + 0x030, 0x00008228, + 0x030, 0x00009238, + 0x030, 0x0000A238, + 0x030, 0x0000B238, + 0x030, 0x0000C238, + 0x030, 0x0000D238, + 0x030, 0x0000E228, + 0x030, 0x0000F238, + 0x030, 0x00010238, + 0x030, 0x00011238, + 0x030, 0x00012228, + 0x030, 0x00013238, + 0x030, 0x00014238, + 0x030, 0x00015238, + 0x030, 0x00016228, + 0x030, 0x00017238, + 0x030, 0x00018228, + 0x030, 0x00019238, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x030, 0x00000239, + 0x030, 0x00001239, + 0x030, 0x00002239, + 0x030, 0x00003239, + 0x030, 0x00004239, + 0x030, 0x00005239, + 0x030, 0x00006239, + 0x030, 0x00007239, + 0x030, 0x00008239, + 0x030, 0x00009239, + 0x030, 0x0000A239, + 0x030, 0x0000B239, + 0x030, 0x0000C239, + 0x030, 0x0000D239, + 0x030, 0x0000E209, + 0x030, 0x0000F239, + 0x030, 0x00010239, + 0x030, 0x00011239, + 0x030, 0x00012209, + 0x030, 0x00013239, + 0x030, 0x00014239, + 0x030, 0x00015239, + 0x030, 0x00016209, + 0x030, 0x00017239, + 0x030, 0x00018209, + 0x030, 0x00019239, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x030, 0x00000239, + 0x030, 0x00001239, + 0x030, 0x00002239, + 0x030, 0x00003239, + 0x030, 0x00004239, + 0x030, 0x00005239, + 0x030, 0x00006239, + 0x030, 0x00007239, + 0x030, 0x00008239, + 0x030, 0x00009239, + 0x030, 0x0000A239, + 0x030, 0x0000B239, + 0x030, 0x0000C239, + 0x030, 0x0000D239, + 0x030, 0x0000E209, + 0x030, 0x0000F239, + 0x030, 0x00010239, + 0x030, 0x00011239, + 0x030, 0x00012209, + 0x030, 0x00013239, + 0x030, 0x00014239, + 0x030, 0x00015239, + 0x030, 0x00016209, + 0x030, 0x00017239, + 0x030, 0x00018209, + 0x030, 0x00019239, 0xA0000000, 0x00000000, 0x030, 0x00000233, 0x030, 0x00001233, @@ -4337,6 +5023,32 @@ static const u32 rtw8822c_rf_a[] = { 0x030, 0x00009334, 0x030, 0x0000A334, 0x030, 0x0000B334, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x030, 0x00000334, + 0x030, 0x00001334, + 0x030, 0x00002334, + 0x030, 0x00003334, + 0x030, 0x00004334, + 0x030, 0x00005334, + 0x030, 0x00006334, + 0x030, 0x00007334, + 0x030, 0x00008334, + 0x030, 0x00009334, + 0x030, 0x0000A334, + 0x030, 0x0000B334, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x030, 0x00000334, + 0x030, 0x00001334, + 0x030, 0x00002334, + 0x030, 0x00003334, + 0x030, 0x00004334, + 0x030, 0x00005334, + 0x030, 0x00006334, + 0x030, 0x00007334, + 0x030, 0x00008334, + 0x030, 0x00009334, + 0x030, 0x0000A334, + 0x030, 0x0000B334, 0xA0000000, 0x00000000, 0x030, 0x00000232, 0x030, 0x00001232, @@ -4444,6 +5156,10 @@ static const u32 rtw8822c_rf_a[] = { 0x052, 0x000902CA, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x052, 0x000902CA, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x052, 0x000902CA, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x052, 0x000902CA, 0xA0000000, 0x00000000, 0x052, 0x000942CA, 0xB0000000, 0x00000000, @@ -4462,9 +5178,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00010E46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4479,9 +5199,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00010E46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4499,6 +5223,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00030246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00030246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4513,9 +5241,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00010E46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4530,9 +5262,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00010E46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4550,6 +5286,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00030246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00030246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4564,9 +5304,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00010E46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4581,9 +5325,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00010E46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4601,6 +5349,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00030246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00030246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4615,9 +5367,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00010E46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4632,9 +5388,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00010E46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4652,6 +5412,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00030246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00030246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4666,9 +5430,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00010E46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4683,9 +5451,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00010E46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4703,6 +5475,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00030246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00030246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4717,9 +5493,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4734,9 +5514,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x00028246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00028246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4754,6 +5538,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00030246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00030246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030246, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4768,9 +5556,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4785,9 +5577,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4805,6 +5601,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00031E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00031E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4819,9 +5619,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4836,9 +5640,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4856,6 +5664,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00031E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00031E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4870,9 +5682,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4887,9 +5703,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4907,6 +5727,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00031E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00031E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4921,9 +5745,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4938,9 +5766,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4958,6 +5790,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00031E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00031E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4972,9 +5808,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -4989,9 +5829,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5009,6 +5853,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00031E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00031E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5023,9 +5871,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5040,9 +5892,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5060,6 +5916,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00031E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00031E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5074,9 +5934,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5091,9 +5955,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5111,6 +5979,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00031E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00031E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5125,9 +5997,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5142,9 +6018,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0000EA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00021E46, + 0x03F, 0x00025E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00025E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5162,6 +6042,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00031E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00031E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00031E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5179,6 +6063,10 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00021E46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00021E46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00021E46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00021E46, 0xA0000000, 0x00000000, 0x03F, 0x00002A46, 0xB0000000, 0x00000000, @@ -5278,17 +6166,17 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00000DF7, 0x93000001, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060, - 0x03F, 0x00000468, + 0x03F, 0x00000467, 0x033, 0x00000061, - 0x03F, 0x00000868, + 0x03F, 0x00000867, 0x033, 0x00000062, - 0x03F, 0x00000909, + 0x03F, 0x00000908, 0x033, 0x00000063, - 0x03F, 0x00000D0A, + 0x03F, 0x00000D09, 0x033, 0x00000064, - 0x03F, 0x00000D4A, + 0x03F, 0x00000D49, 0x033, 0x00000065, - 0x03F, 0x00000D8B, + 0x03F, 0x00000D8A, 0x033, 0x00000066, 0x03F, 0x00000DEB, 0x033, 0x00000067, @@ -5301,17 +6189,63 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00000DF7, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060, - 0x03F, 0x00000468, + 0x03F, 0x00000467, 0x033, 0x00000061, - 0x03F, 0x00000868, + 0x03F, 0x00000867, 0x033, 0x00000062, - 0x03F, 0x00000909, + 0x03F, 0x00000908, 0x033, 0x00000063, - 0x03F, 0x00000D0A, + 0x03F, 0x00000D09, 0x033, 0x00000064, - 0x03F, 0x00000D4A, + 0x03F, 0x00000D49, 0x033, 0x00000065, - 0x03F, 0x00000D8B, + 0x03F, 0x00000D8A, + 0x033, 0x00000066, + 0x03F, 0x00000DEB, + 0x033, 0x00000067, + 0x03F, 0x00000DEE, + 0x033, 0x00000068, + 0x03F, 0x00000DF1, + 0x033, 0x00000069, + 0x03F, 0x00000DF4, + 0x033, 0x0000006A, + 0x03F, 0x00000DF7, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000060, + 0x03F, 0x00000467, + 0x033, 0x00000061, + 0x03F, 0x00000867, + 0x033, 0x00000062, + 0x03F, 0x00000908, + 0x033, 0x00000063, + 0x03F, 0x00000D09, + 0x033, 0x00000064, + 0x03F, 0x00000D49, + 0x033, 0x00000065, + 0x03F, 0x00000D8A, + 0x033, 0x00000066, + 0x03F, 0x00000DEB, + 0x033, 0x00000067, + 0x03F, 0x00000DEE, + 0x033, 0x00000068, + 0x03F, 0x00000DF1, + 0x033, 0x00000069, + 0x03F, 0x00000DF4, + 0x033, 0x0000006A, + 0x03F, 0x00000DF7, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000060, + 0x03F, 0x00000467, + 0x033, 0x00000061, + 0x03F, 0x00000867, + 0x033, 0x00000062, + 0x03F, 0x00000908, + 0x033, 0x00000063, + 0x03F, 0x00000D09, + 0x033, 0x00000064, + 0x03F, 0x00000D49, + 0x033, 0x00000065, + 0x03F, 0x00000D8A, 0x033, 0x00000066, 0x03F, 0x00000DEB, 0x033, 0x00000067, @@ -5440,17 +6374,17 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00000DF7, 0x93000001, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020, - 0x03F, 0x00000468, + 0x03F, 0x00000467, 0x033, 0x00000021, - 0x03F, 0x00000868, + 0x03F, 0x00000867, 0x033, 0x00000022, - 0x03F, 0x00000909, + 0x03F, 0x00000908, 0x033, 0x00000023, - 0x03F, 0x00000D0A, + 0x03F, 0x00000D09, 0x033, 0x00000024, - 0x03F, 0x00000D4A, + 0x03F, 0x00000D49, 0x033, 0x00000025, - 0x03F, 0x00000D8B, + 0x03F, 0x00000D8A, 0x033, 0x00000026, 0x03F, 0x00000DEB, 0x033, 0x00000027, @@ -5463,17 +6397,63 @@ static const u32 rtw8822c_rf_a[] = { 0x03F, 0x00000DF7, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020, - 0x03F, 0x00000468, + 0x03F, 0x00000467, 0x033, 0x00000021, - 0x03F, 0x00000868, + 0x03F, 0x00000867, 0x033, 0x00000022, - 0x03F, 0x00000909, + 0x03F, 0x00000908, 0x033, 0x00000023, - 0x03F, 0x00000D0A, + 0x03F, 0x00000D09, 0x033, 0x00000024, - 0x03F, 0x00000D4A, + 0x03F, 0x00000D49, 0x033, 0x00000025, - 0x03F, 0x00000D8B, + 0x03F, 0x00000D8A, + 0x033, 0x00000026, + 0x03F, 0x00000DEB, + 0x033, 0x00000027, + 0x03F, 0x00000DEE, + 0x033, 0x00000028, + 0x03F, 0x00000DF1, + 0x033, 0x00000029, + 0x03F, 0x00000DF4, + 0x033, 0x0000002A, + 0x03F, 0x00000DF7, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000020, + 0x03F, 0x00000467, + 0x033, 0x00000021, + 0x03F, 0x00000867, + 0x033, 0x00000022, + 0x03F, 0x00000908, + 0x033, 0x00000023, + 0x03F, 0x00000D09, + 0x033, 0x00000024, + 0x03F, 0x00000D49, + 0x033, 0x00000025, + 0x03F, 0x00000D8A, + 0x033, 0x00000026, + 0x03F, 0x00000DEB, + 0x033, 0x00000027, + 0x03F, 0x00000DEE, + 0x033, 0x00000028, + 0x03F, 0x00000DF1, + 0x033, 0x00000029, + 0x03F, 0x00000DF4, + 0x033, 0x0000002A, + 0x03F, 0x00000DF7, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000020, + 0x03F, 0x00000467, + 0x033, 0x00000021, + 0x03F, 0x00000867, + 0x033, 0x00000022, + 0x03F, 0x00000908, + 0x033, 0x00000023, + 0x03F, 0x00000D09, + 0x033, 0x00000024, + 0x03F, 0x00000D49, + 0x033, 0x00000025, + 0x03F, 0x00000D8A, 0x033, 0x00000026, 0x03F, 0x00000DEB, 0x033, 0x00000027, @@ -5526,15 +6506,25 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x0B3, 0x0007C760, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x0B3, 0x0007C760, + 0x0B3, 0x000FC760, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x0B3, 0x0007C760, + 0x0B3, 0x000FC760, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0B3, 0x000FC760, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0B3, 0x000FC760, 0xA0000000, 0x00000000, 0x0B3, 0x0007C760, 0xB0000000, 0x00000000, 0x0B4, 0x00099D40, 0x0B5, 0x0004103F, + 0x83000003, 0x00000000, 0x40000000, 0x00000000, + 0x0B6, 0x000387F8, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0B6, 0x000387F8, + 0xA0000000, 0x00000000, 0x0B6, 0x000187F8, + 0xB0000000, 0x00000000, 0x0B7, 0x00030018, 0x0BC, 0x00000008, 0x0D3, 0x00000542, @@ -5555,9 +6545,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x0B3, 0x0007C700, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x0B3, 0x0007C700, + 0x0B3, 0x000FC760, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x0B3, 0x0007C700, + 0x0B3, 0x000FC760, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0B3, 0x000FC760, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0B3, 0x000FC760, 0xA0000000, 0x00000000, 0x0B3, 0x0007C700, 0xB0000000, 0x00000000, @@ -5573,9 +6567,13 @@ static const u32 rtw8822c_rf_a[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x0B3, 0x0007C760, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x0B3, 0x0007C760, + 0x0B3, 0x000FC760, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x0B3, 0x0007C760, + 0x0B3, 0x000FC760, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0B3, 0x000FC760, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0B3, 0x000FC760, 0xA0000000, 0x00000000, 0x0B3, 0x0007C760, 0xB0000000, 0x00000000, @@ -5584,10 +6582,14 @@ static const u32 rtw8822c_rf_a[] = { 0x0CD, 0x00089600, 0x018, 0x00013108, 0x0FE, 0x00000000, + 0x0FE, 0x00000000, 0x0B8, 0x000C0440, 0x0BA, 0x000E840D, 0x0FE, 0x00000000, + 0x0FE, 0x00000000, 0x018, 0x00013124, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, 0x059, 0x000A0000, 0x05A, 0x00060000, 0x05B, 0x00014000, @@ -5595,6 +6597,18 @@ static const u32 rtw8822c_rf_a[] = { 0x033, 0x00000001, 0x03F, 0x0000000F, 0x0ED, 0x00000000, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x0DD, 0x00000540, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x0DD, 0x00000540, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0DD, 0x00000540, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0DD, 0x00000540, + 0xA0000000, 0x00000000, + 0x0DD, 0x00000500, + 0xB0000000, 0x00000000, + 0x0BC, 0x00000004, 0x0EE, 0x00000002, 0x033, 0x00000017, 0x03F, 0x0000003F, @@ -5672,9 +6686,24 @@ static const u32 rtw8822c_rf_a[] = { 0x0FE, 0x00000000, 0x0FE, 0x00000000, 0x092, 0x00084800, - 0x08F, 0x0000182C, + 0x08F, 0x00001B4C, 0x088, 0x0004326B, 0x019, 0x00000005, + 0x0EF, 0x00080000, + 0x033, 0x00000004, + 0x03E, 0x00000003, + 0x03F, 0x000F60FF, + 0x0EF, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000006, + 0x03E, 0x00000003, + 0x03F, 0x000760FF, + 0x0EF, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000007, + 0x03E, 0x00000003, + 0x03F, 0x0007DEFF, + 0x0EF, 0x00000000, }; RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_a, A); @@ -5685,7 +6714,7 @@ static const u32 rtw8822c_rf_b[] = { 0x093, 0x0008483F, 0x0EF, 0x00080000, 0x033, 0x00000001, - 0x03F, 0x00091020, + 0x03F, 0x00091230, 0x0EF, 0x00000000, 0x0DE, 0x00000020, 0x81000001, 0x00000000, 0x40000000, 0x00000000, @@ -5700,6 +6729,10 @@ static const u32 rtw8822c_rf_b[] = { 0x08E, 0x000A5540, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x08E, 0x000A5540, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x08E, 0x000A5540, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x08E, 0x000A5540, 0xA0000000, 0x00000000, 0x08E, 0x000A5540, 0xB0000000, 0x00000000, @@ -5761,6 +6794,24 @@ static const u32 rtw8822c_rf_b[] = { 0x033, 0x00000002, 0x03F, 0x0000002A, 0x0EE, 0x00000000, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00000010, + 0x033, 0x00000001, + 0x03F, 0x0000002A, + 0x033, 0x00000001, + 0x03F, 0x0000002A, + 0x033, 0x00000002, + 0x03F, 0x0000002A, + 0x0EE, 0x00000000, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00000010, + 0x033, 0x00000001, + 0x03F, 0x0000002A, + 0x033, 0x00000001, + 0x03F, 0x0000002A, + 0x033, 0x00000002, + 0x03F, 0x0000002A, + 0x0EE, 0x00000000, 0xA0000000, 0x00000000, 0x0EE, 0x00000010, 0x033, 0x00000001, @@ -5927,6 +6978,58 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x00000180, 0x033, 0x00000004, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00010000, + 0x033, 0x0000000F, + 0x03F, 0x000773C0, + 0x033, 0x0000000E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000000D, + 0x03F, 0x000773E8, + 0x033, 0x0000000C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000000B, + 0x03F, 0x00000287, + 0x033, 0x0000000A, + 0x03F, 0x000002A8, + 0x033, 0x00000009, + 0x03F, 0x00000207, + 0x033, 0x00000008, + 0x03F, 0x000FF280, + 0x033, 0x00000007, + 0x03F, 0x00000200, + 0x033, 0x00000006, + 0x03F, 0x000001C0, + 0x033, 0x00000005, + 0x03F, 0x00000180, + 0x033, 0x00000004, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00010000, + 0x033, 0x0000000F, + 0x03F, 0x000773C0, + 0x033, 0x0000000E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000000D, + 0x03F, 0x000773E8, + 0x033, 0x0000000C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000000B, + 0x03F, 0x00000287, + 0x033, 0x0000000A, + 0x03F, 0x000002A8, + 0x033, 0x00000009, + 0x03F, 0x00000207, + 0x033, 0x00000008, + 0x03F, 0x000FF280, + 0x033, 0x00000007, + 0x03F, 0x00000200, + 0x033, 0x00000006, + 0x03F, 0x000001C0, + 0x033, 0x00000005, + 0x03F, 0x00000180, + 0x033, 0x00000004, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x0EF, 0x00010000, 0x033, 0x0000000F, @@ -6106,6 +7209,56 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x00000180, 0x033, 0x00000014, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000001F, + 0x03F, 0x000773C0, + 0x033, 0x0000001E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000001D, + 0x03F, 0x000773E8, + 0x033, 0x0000001C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000001B, + 0x03F, 0x00000287, + 0x033, 0x0000001A, + 0x03F, 0x000002A8, + 0x033, 0x00000019, + 0x03F, 0x00000207, + 0x033, 0x00000018, + 0x03F, 0x000FF280, + 0x033, 0x00000017, + 0x03F, 0x00000200, + 0x033, 0x00000016, + 0x03F, 0x000001C0, + 0x033, 0x00000015, + 0x03F, 0x00000180, + 0x033, 0x00000014, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000001F, + 0x03F, 0x000773C0, + 0x033, 0x0000001E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000001D, + 0x03F, 0x000773E8, + 0x033, 0x0000001C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000001B, + 0x03F, 0x00000287, + 0x033, 0x0000001A, + 0x03F, 0x000002A8, + 0x033, 0x00000019, + 0x03F, 0x00000207, + 0x033, 0x00000018, + 0x03F, 0x000FF280, + 0x033, 0x00000017, + 0x03F, 0x00000200, + 0x033, 0x00000016, + 0x03F, 0x000001C0, + 0x033, 0x00000015, + 0x03F, 0x00000180, + 0x033, 0x00000014, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x033, 0x0000001F, 0x03F, 0x000773E8, @@ -6284,6 +7437,56 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x00000180, 0x033, 0x00000024, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000002F, + 0x03F, 0x000773C0, + 0x033, 0x0000002E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000002D, + 0x03F, 0x000773E8, + 0x033, 0x0000002C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000002B, + 0x03F, 0x00000287, + 0x033, 0x0000002A, + 0x03F, 0x000002A8, + 0x033, 0x00000029, + 0x03F, 0x00000207, + 0x033, 0x00000028, + 0x03F, 0x000FF280, + 0x033, 0x00000027, + 0x03F, 0x00000200, + 0x033, 0x00000026, + 0x03F, 0x000001C0, + 0x033, 0x00000025, + 0x03F, 0x00000180, + 0x033, 0x00000024, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000002F, + 0x03F, 0x000773C0, + 0x033, 0x0000002E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000002D, + 0x03F, 0x000773E8, + 0x033, 0x0000002C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000002B, + 0x03F, 0x00000287, + 0x033, 0x0000002A, + 0x03F, 0x000002A8, + 0x033, 0x00000029, + 0x03F, 0x00000207, + 0x033, 0x00000028, + 0x03F, 0x000FF280, + 0x033, 0x00000027, + 0x03F, 0x00000200, + 0x033, 0x00000026, + 0x03F, 0x000001C0, + 0x033, 0x00000025, + 0x03F, 0x00000180, + 0x033, 0x00000024, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x033, 0x0000002F, 0x03F, 0x000773E8, @@ -6462,6 +7665,56 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x00000180, 0x033, 0x00000034, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000003F, + 0x03F, 0x000773C0, + 0x033, 0x0000003E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000003D, + 0x03F, 0x000773E8, + 0x033, 0x0000003C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000003B, + 0x03F, 0x00000287, + 0x033, 0x0000003A, + 0x03F, 0x000002A8, + 0x033, 0x00000039, + 0x03F, 0x00000207, + 0x033, 0x00000038, + 0x03F, 0x000FF280, + 0x033, 0x00000037, + 0x03F, 0x00000200, + 0x033, 0x00000036, + 0x03F, 0x000001C0, + 0x033, 0x00000035, + 0x03F, 0x00000180, + 0x033, 0x00000034, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000003F, + 0x03F, 0x000773C0, + 0x033, 0x0000003E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000003D, + 0x03F, 0x000773E8, + 0x033, 0x0000003C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000003B, + 0x03F, 0x00000287, + 0x033, 0x0000003A, + 0x03F, 0x000002A8, + 0x033, 0x00000039, + 0x03F, 0x00000207, + 0x033, 0x00000038, + 0x03F, 0x000FF280, + 0x033, 0x00000037, + 0x03F, 0x00000200, + 0x033, 0x00000036, + 0x03F, 0x000001C0, + 0x033, 0x00000035, + 0x03F, 0x00000180, + 0x033, 0x00000034, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x033, 0x0000003F, 0x03F, 0x000773E8, @@ -6640,6 +7893,56 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x00000180, 0x033, 0x00000044, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000004F, + 0x03F, 0x000773C0, + 0x033, 0x0000004E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000004D, + 0x03F, 0x000773E8, + 0x033, 0x0000004C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000004B, + 0x03F, 0x00000287, + 0x033, 0x0000004A, + 0x03F, 0x000002A8, + 0x033, 0x00000049, + 0x03F, 0x00000207, + 0x033, 0x00000048, + 0x03F, 0x000FF280, + 0x033, 0x00000047, + 0x03F, 0x00000200, + 0x033, 0x00000046, + 0x03F, 0x000001C0, + 0x033, 0x00000045, + 0x03F, 0x00000180, + 0x033, 0x00000044, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000004F, + 0x03F, 0x000773C0, + 0x033, 0x0000004E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000004D, + 0x03F, 0x000773E8, + 0x033, 0x0000004C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000004B, + 0x03F, 0x00000287, + 0x033, 0x0000004A, + 0x03F, 0x000002A8, + 0x033, 0x00000049, + 0x03F, 0x00000207, + 0x033, 0x00000048, + 0x03F, 0x000FF280, + 0x033, 0x00000047, + 0x03F, 0x00000200, + 0x033, 0x00000046, + 0x03F, 0x000001C0, + 0x033, 0x00000045, + 0x03F, 0x00000180, + 0x033, 0x00000044, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x033, 0x0000004F, 0x03F, 0x000773E8, @@ -6818,6 +8121,56 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x00000180, 0x033, 0x00000054, 0x03F, 0x00000040, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000005F, + 0x03F, 0x000773C0, + 0x033, 0x0000005E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000005D, + 0x03F, 0x000773E8, + 0x033, 0x0000005C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000005B, + 0x03F, 0x00000287, + 0x033, 0x0000005A, + 0x03F, 0x000002A8, + 0x033, 0x00000059, + 0x03F, 0x00000207, + 0x033, 0x00000058, + 0x03F, 0x000FF280, + 0x033, 0x00000057, + 0x03F, 0x00000200, + 0x033, 0x00000056, + 0x03F, 0x000001C0, + 0x033, 0x00000055, + 0x03F, 0x00000180, + 0x033, 0x00000054, + 0x03F, 0x00000040, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000005F, + 0x03F, 0x000773C0, + 0x033, 0x0000005E, + 0x03F, 0x000FF3C0, + 0x033, 0x0000005D, + 0x03F, 0x000773E8, + 0x033, 0x0000005C, + 0x03F, 0x000FF3E8, + 0x033, 0x0000005B, + 0x03F, 0x00000287, + 0x033, 0x0000005A, + 0x03F, 0x000002A8, + 0x033, 0x00000059, + 0x03F, 0x00000207, + 0x033, 0x00000058, + 0x03F, 0x000FF280, + 0x033, 0x00000057, + 0x03F, 0x00000200, + 0x033, 0x00000056, + 0x03F, 0x000001C0, + 0x033, 0x00000055, + 0x03F, 0x00000180, + 0x033, 0x00000054, + 0x03F, 0x00000040, 0xA0000000, 0x00000000, 0x033, 0x0000005F, 0x03F, 0x000773E8, @@ -6858,6 +8211,10 @@ static const u32 rtw8822c_rf_b[] = { 0x0EF, 0x00000000, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x0EF, 0x00000000, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000000, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000000, 0xA0000000, 0x00000000, 0x0EF, 0x00000000, 0xB0000000, 0x00000000, @@ -7757,6 +9114,300 @@ static const u32 rtw8822c_rf_b[] = { 0x03E, 0x00000000, 0x03F, 0x0002C010, 0x0EF, 0x00000000, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00020000, + 0x033, 0x00000000, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000001, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000002, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000003, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000004, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000005, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000006, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000007, + 0x03E, 0x00000000, + 0x03F, 0x0002F81C, + 0x033, 0x00000008, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000009, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000000A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000000B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000000C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000000D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000000E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000000F, + 0x03E, 0x00000000, + 0x03F, 0x0002F81C, + 0x033, 0x00000010, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000011, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000012, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000013, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000014, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000015, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000016, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000017, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000018, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000019, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000001A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000001B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000001C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000001D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000001E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000001F, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000020, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000021, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000022, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000023, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000024, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000025, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000026, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000027, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000028, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000029, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000002A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000002B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000002C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000002D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000002E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000002F, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x0EF, 0x00000000, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00020000, + 0x033, 0x00000000, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000001, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000002, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000003, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000004, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000005, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000006, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000007, + 0x03E, 0x00000000, + 0x03F, 0x0002F81C, + 0x033, 0x00000008, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000009, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000000A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000000B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000000C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000000D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000000E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000000F, + 0x03E, 0x00000000, + 0x03F, 0x0002F81C, + 0x033, 0x00000010, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000011, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000012, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000013, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000014, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000015, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000016, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000017, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000018, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000019, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000001A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000001B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000001C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000001D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000001E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000001F, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000020, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000021, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x00000022, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x00000023, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x00000024, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x00000025, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x00000026, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x00000027, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x033, 0x00000028, + 0x03E, 0x00001C86, + 0x03F, 0x00020000, + 0x033, 0x00000029, + 0x03E, 0x00001C02, + 0x03F, 0x00020000, + 0x033, 0x0000002A, + 0x03E, 0x00000F02, + 0x03F, 0x00020000, + 0x033, 0x0000002B, + 0x03E, 0x00000F00, + 0x03F, 0x00020000, + 0x033, 0x0000002C, + 0x03E, 0x00000086, + 0x03F, 0x00020000, + 0x033, 0x0000002D, + 0x03E, 0x00000002, + 0x03F, 0x00020000, + 0x033, 0x0000002E, + 0x03E, 0x00000000, + 0x03F, 0x00020000, + 0x033, 0x0000002F, + 0x03E, 0x00000000, + 0x03F, 0x0002C010, + 0x0EF, 0x00000000, 0xA0000000, 0x00000000, 0x0EF, 0x00020000, 0x033, 0x00000000, @@ -7921,6 +9572,10 @@ static const u32 rtw8822c_rf_b[] = { 0x063, 0x00000002, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x00000002, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x00000002, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x00000002, 0xA0000000, 0x00000000, 0x063, 0x00000C02, 0xB0000000, 0x00000000, @@ -8034,59 +9689,113 @@ static const u32 rtw8822c_rf_b[] = { 0x030, 0x00018207, 0x030, 0x00019237, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x030, 0x00000237, - 0x030, 0x00001237, - 0x030, 0x00002237, - 0x030, 0x00003237, - 0x030, 0x00004207, - 0x030, 0x00005237, - 0x030, 0x00006237, - 0x030, 0x00007237, - 0x030, 0x00008207, - 0x030, 0x00009237, - 0x030, 0x0000A237, - 0x030, 0x0000B237, - 0x030, 0x0000C237, - 0x030, 0x0000D237, - 0x030, 0x0000E207, - 0x030, 0x0000F237, - 0x030, 0x00010237, - 0x030, 0x00011237, - 0x030, 0x00012207, - 0x030, 0x00013237, - 0x030, 0x00014237, - 0x030, 0x00015237, - 0x030, 0x00016207, - 0x030, 0x00017237, - 0x030, 0x00018207, - 0x030, 0x00019237, + 0x030, 0x00000238, + 0x030, 0x00001238, + 0x030, 0x00002238, + 0x030, 0x00003238, + 0x030, 0x00004228, + 0x030, 0x00005238, + 0x030, 0x00006238, + 0x030, 0x00007238, + 0x030, 0x00008228, + 0x030, 0x00009238, + 0x030, 0x0000A238, + 0x030, 0x0000B238, + 0x030, 0x0000C238, + 0x030, 0x0000D238, + 0x030, 0x0000E228, + 0x030, 0x0000F238, + 0x030, 0x00010238, + 0x030, 0x00011238, + 0x030, 0x00012228, + 0x030, 0x00013238, + 0x030, 0x00014238, + 0x030, 0x00015238, + 0x030, 0x00016228, + 0x030, 0x00017238, + 0x030, 0x00018228, + 0x030, 0x00019238, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x030, 0x00000237, - 0x030, 0x00001237, - 0x030, 0x00002237, - 0x030, 0x00003237, - 0x030, 0x00004207, - 0x030, 0x00005237, - 0x030, 0x00006237, - 0x030, 0x00007237, - 0x030, 0x00008207, - 0x030, 0x00009237, - 0x030, 0x0000A237, - 0x030, 0x0000B237, - 0x030, 0x0000C237, - 0x030, 0x0000D237, - 0x030, 0x0000E207, - 0x030, 0x0000F237, - 0x030, 0x00010237, - 0x030, 0x00011237, - 0x030, 0x00012207, - 0x030, 0x00013237, - 0x030, 0x00014237, - 0x030, 0x00015237, - 0x030, 0x00016207, - 0x030, 0x00017237, - 0x030, 0x00018207, - 0x030, 0x00019237, + 0x030, 0x00000238, + 0x030, 0x00001238, + 0x030, 0x00002238, + 0x030, 0x00003238, + 0x030, 0x00004228, + 0x030, 0x00005238, + 0x030, 0x00006238, + 0x030, 0x00007238, + 0x030, 0x00008228, + 0x030, 0x00009238, + 0x030, 0x0000A238, + 0x030, 0x0000B238, + 0x030, 0x0000C238, + 0x030, 0x0000D238, + 0x030, 0x0000E228, + 0x030, 0x0000F238, + 0x030, 0x00010238, + 0x030, 0x00011238, + 0x030, 0x00012228, + 0x030, 0x00013238, + 0x030, 0x00014238, + 0x030, 0x00015238, + 0x030, 0x00016228, + 0x030, 0x00017238, + 0x030, 0x00018228, + 0x030, 0x00019238, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x030, 0x00000239, + 0x030, 0x00001239, + 0x030, 0x00002239, + 0x030, 0x00003239, + 0x030, 0x00004239, + 0x030, 0x00005239, + 0x030, 0x00006239, + 0x030, 0x00007239, + 0x030, 0x00008239, + 0x030, 0x00009239, + 0x030, 0x0000A239, + 0x030, 0x0000B239, + 0x030, 0x0000C239, + 0x030, 0x0000D239, + 0x030, 0x0000E209, + 0x030, 0x0000F239, + 0x030, 0x00010239, + 0x030, 0x00011239, + 0x030, 0x00012209, + 0x030, 0x00013239, + 0x030, 0x00014239, + 0x030, 0x00015239, + 0x030, 0x00016209, + 0x030, 0x00017239, + 0x030, 0x00018209, + 0x030, 0x00019239, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x030, 0x00000239, + 0x030, 0x00001239, + 0x030, 0x00002239, + 0x030, 0x00003239, + 0x030, 0x00004239, + 0x030, 0x00005239, + 0x030, 0x00006239, + 0x030, 0x00007239, + 0x030, 0x00008239, + 0x030, 0x00009239, + 0x030, 0x0000A239, + 0x030, 0x0000B239, + 0x030, 0x0000C239, + 0x030, 0x0000D239, + 0x030, 0x0000E209, + 0x030, 0x0000F239, + 0x030, 0x00010239, + 0x030, 0x00011239, + 0x030, 0x00012209, + 0x030, 0x00013239, + 0x030, 0x00014239, + 0x030, 0x00015239, + 0x030, 0x00016209, + 0x030, 0x00017239, + 0x030, 0x00018209, + 0x030, 0x00019239, 0xA0000000, 0x00000000, 0x030, 0x00000233, 0x030, 0x00001233, @@ -8195,6 +9904,32 @@ static const u32 rtw8822c_rf_b[] = { 0x030, 0x00009334, 0x030, 0x0000A334, 0x030, 0x0000B334, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x030, 0x00000334, + 0x030, 0x00001334, + 0x030, 0x00002334, + 0x030, 0x00003334, + 0x030, 0x00004334, + 0x030, 0x00005334, + 0x030, 0x00006334, + 0x030, 0x00007334, + 0x030, 0x00008334, + 0x030, 0x00009334, + 0x030, 0x0000A334, + 0x030, 0x0000B334, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x030, 0x00000334, + 0x030, 0x00001334, + 0x030, 0x00002334, + 0x030, 0x00003334, + 0x030, 0x00004334, + 0x030, 0x00005334, + 0x030, 0x00006334, + 0x030, 0x00007334, + 0x030, 0x00008334, + 0x030, 0x00009334, + 0x030, 0x0000A334, + 0x030, 0x0000B334, 0xA0000000, 0x00000000, 0x030, 0x00000232, 0x030, 0x00001232, @@ -8302,6 +10037,10 @@ static const u32 rtw8822c_rf_b[] = { 0x052, 0x000902CA, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x052, 0x000902CA, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x052, 0x000902CA, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x052, 0x000902CA, 0xA0000000, 0x00000000, 0x052, 0x000942C0, 0xB0000000, 0x00000000, @@ -8310,7 +10049,17 @@ static const u32 rtw8822c_rf_b[] = { 0x057, 0x0004C80A, 0x0EF, 0x00000020, 0x033, 0x00000000, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8320,14 +10069,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x0000C246, 0xB0000000, 0x00000000, 0x033, 0x00000001, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8337,9 +10100,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x0000C246, 0xB0000000, 0x00000000, @@ -8357,11 +10124,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002C246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002C246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, 0xA0000000, 0x00000000, 0x03F, 0x0000C246, 0xB0000000, 0x00000000, 0x033, 0x00000003, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8371,14 +10152,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x0000C246, 0xB0000000, 0x00000000, 0x033, 0x00000004, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8388,9 +10183,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x0000C246, 0xB0000000, 0x00000000, @@ -8408,11 +10207,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002C246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002C246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, 0xA0000000, 0x00000000, 0x03F, 0x0000C246, 0xB0000000, 0x00000000, 0x033, 0x00000006, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8422,14 +10235,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x0000C246, 0xB0000000, 0x00000000, 0x033, 0x00000007, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8439,9 +10266,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x0000C246, 0xB0000000, 0x00000000, @@ -8459,11 +10290,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002C246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002C246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, 0xA0000000, 0x00000000, 0x03F, 0x0000C246, 0xB0000000, 0x00000000, 0x033, 0x00000009, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8473,14 +10318,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x0000000A, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8490,9 +10349,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -8510,11 +10373,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002C246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002C246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x0000000C, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8524,14 +10401,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x0000000D, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8541,9 +10432,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -8561,11 +10456,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002C246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002C246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x0000000F, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8575,14 +10484,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000010, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8592,9 +10515,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x00024246, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x00024246, + 0x03F, 0x000241C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000241C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -8612,11 +10539,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002C246, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002C246, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002C246, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000012, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8626,14 +10567,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000013, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8643,9 +10598,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -8663,11 +10622,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002CA46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002CA46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000015, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8677,14 +10650,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000016, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8694,9 +10681,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -8714,11 +10705,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002CA46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002CA46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000018, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8728,14 +10733,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000019, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8745,9 +10764,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -8765,11 +10788,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002CA46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002CA46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x0000001B, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8779,14 +10816,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x0000001C, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8796,9 +10847,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -8816,11 +10871,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002CA46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002CA46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x0000001E, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8830,14 +10899,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x0000001F, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8847,9 +10930,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -8867,11 +10954,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002CA46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002CA46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000021, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8881,14 +10982,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000022, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8898,14 +11013,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000023, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000020, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000020, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000020, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, 0x03E, 0x00000020, + 0xA0000000, 0x00000000, + 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8918,11 +11047,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002CA46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002CA46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000024, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8932,14 +11075,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000025, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8949,9 +11106,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -8969,11 +11130,25 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002CA46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002CA46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000027, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -8983,14 +11158,28 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, 0x033, 0x00000028, + 0x83000001, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000002, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03E, 0x00000030, + 0xA0000000, 0x00000000, 0x03E, 0x00000020, + 0xB0000000, 0x00000000, 0x81000001, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x91000002, 0x00000000, 0x40000000, 0x00000000, @@ -9000,9 +11189,13 @@ static const u32 rtw8822c_rf_b[] = { 0x92000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, 0x93000001, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, 0x93000002, 0x00000000, 0x40000000, 0x00000000, - 0x03F, 0x0001CA46, + 0x03F, 0x000209C6, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x000209C6, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -9020,6 +11213,10 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0002CA46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0002CA46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0002CA46, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -9037,6 +11234,10 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x0001CA46, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x03F, 0x0001CA46, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0001CA46, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x0001CA46, 0xA0000000, 0x00000000, 0x03F, 0x00008E46, 0xB0000000, 0x00000000, @@ -9136,17 +11337,17 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x00000DF7, 0x93000001, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060, - 0x03F, 0x00000468, + 0x03F, 0x00000467, 0x033, 0x00000061, - 0x03F, 0x00000868, + 0x03F, 0x00000867, 0x033, 0x00000062, - 0x03F, 0x00000909, + 0x03F, 0x00000908, 0x033, 0x00000063, - 0x03F, 0x00000D0A, + 0x03F, 0x00000D09, 0x033, 0x00000064, - 0x03F, 0x00000D4A, + 0x03F, 0x00000D49, 0x033, 0x00000065, - 0x03F, 0x00000D8B, + 0x03F, 0x00000D8A, 0x033, 0x00000066, 0x03F, 0x00000DEB, 0x033, 0x00000067, @@ -9159,17 +11360,63 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x00000DF7, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000060, - 0x03F, 0x00000468, + 0x03F, 0x00000467, 0x033, 0x00000061, - 0x03F, 0x00000868, + 0x03F, 0x00000867, 0x033, 0x00000062, - 0x03F, 0x00000909, + 0x03F, 0x00000908, 0x033, 0x00000063, - 0x03F, 0x00000D0A, + 0x03F, 0x00000D09, 0x033, 0x00000064, - 0x03F, 0x00000D4A, + 0x03F, 0x00000D49, 0x033, 0x00000065, - 0x03F, 0x00000D8B, + 0x03F, 0x00000D8A, + 0x033, 0x00000066, + 0x03F, 0x00000DEB, + 0x033, 0x00000067, + 0x03F, 0x00000DEE, + 0x033, 0x00000068, + 0x03F, 0x00000DF1, + 0x033, 0x00000069, + 0x03F, 0x00000DF4, + 0x033, 0x0000006A, + 0x03F, 0x00000DF7, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000060, + 0x03F, 0x00000467, + 0x033, 0x00000061, + 0x03F, 0x00000867, + 0x033, 0x00000062, + 0x03F, 0x00000908, + 0x033, 0x00000063, + 0x03F, 0x00000D09, + 0x033, 0x00000064, + 0x03F, 0x00000D49, + 0x033, 0x00000065, + 0x03F, 0x00000D8A, + 0x033, 0x00000066, + 0x03F, 0x00000DEB, + 0x033, 0x00000067, + 0x03F, 0x00000DEE, + 0x033, 0x00000068, + 0x03F, 0x00000DF1, + 0x033, 0x00000069, + 0x03F, 0x00000DF4, + 0x033, 0x0000006A, + 0x03F, 0x00000DF7, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000060, + 0x03F, 0x00000467, + 0x033, 0x00000061, + 0x03F, 0x00000867, + 0x033, 0x00000062, + 0x03F, 0x00000908, + 0x033, 0x00000063, + 0x03F, 0x00000D09, + 0x033, 0x00000064, + 0x03F, 0x00000D49, + 0x033, 0x00000065, + 0x03F, 0x00000D8A, 0x033, 0x00000066, 0x03F, 0x00000DEB, 0x033, 0x00000067, @@ -9298,17 +11545,17 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x00000DF7, 0x93000001, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020, - 0x03F, 0x00000468, + 0x03F, 0x00000467, 0x033, 0x00000021, - 0x03F, 0x00000868, + 0x03F, 0x00000867, 0x033, 0x00000022, - 0x03F, 0x00000909, + 0x03F, 0x00000908, 0x033, 0x00000023, - 0x03F, 0x00000D0A, + 0x03F, 0x00000D09, 0x033, 0x00000024, - 0x03F, 0x00000D4A, + 0x03F, 0x00000D49, 0x033, 0x00000025, - 0x03F, 0x00000D8B, + 0x03F, 0x00000D8A, 0x033, 0x00000026, 0x03F, 0x00000DEB, 0x033, 0x00000027, @@ -9321,17 +11568,63 @@ static const u32 rtw8822c_rf_b[] = { 0x03F, 0x00000DF7, 0x93000002, 0x00000000, 0x40000000, 0x00000000, 0x033, 0x00000020, - 0x03F, 0x00000468, + 0x03F, 0x00000467, 0x033, 0x00000021, - 0x03F, 0x00000868, + 0x03F, 0x00000867, 0x033, 0x00000022, - 0x03F, 0x00000909, + 0x03F, 0x00000908, 0x033, 0x00000023, - 0x03F, 0x00000D0A, + 0x03F, 0x00000D09, 0x033, 0x00000024, - 0x03F, 0x00000D4A, + 0x03F, 0x00000D49, 0x033, 0x00000025, - 0x03F, 0x00000D8B, + 0x03F, 0x00000D8A, + 0x033, 0x00000026, + 0x03F, 0x00000DEB, + 0x033, 0x00000027, + 0x03F, 0x00000DEE, + 0x033, 0x00000028, + 0x03F, 0x00000DF1, + 0x033, 0x00000029, + 0x03F, 0x00000DF4, + 0x033, 0x0000002A, + 0x03F, 0x00000DF7, + 0x93000003, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000020, + 0x03F, 0x00000467, + 0x033, 0x00000021, + 0x03F, 0x00000867, + 0x033, 0x00000022, + 0x03F, 0x00000908, + 0x033, 0x00000023, + 0x03F, 0x00000D09, + 0x033, 0x00000024, + 0x03F, 0x00000D49, + 0x033, 0x00000025, + 0x03F, 0x00000D8A, + 0x033, 0x00000026, + 0x03F, 0x00000DEB, + 0x033, 0x00000027, + 0x03F, 0x00000DEE, + 0x033, 0x00000028, + 0x03F, 0x00000DF1, + 0x033, 0x00000029, + 0x03F, 0x00000DF4, + 0x033, 0x0000002A, + 0x03F, 0x00000DF7, + 0x93000004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000020, + 0x03F, 0x00000467, + 0x033, 0x00000021, + 0x03F, 0x00000867, + 0x033, 0x00000022, + 0x03F, 0x00000908, + 0x033, 0x00000023, + 0x03F, 0x00000D09, + 0x033, 0x00000024, + 0x03F, 0x00000D49, + 0x033, 0x00000025, + 0x03F, 0x00000D8A, 0x033, 0x00000026, 0x03F, 0x00000DEB, 0x033, 0x00000027, @@ -9396,896 +11689,1894 @@ static const u32 rtw8822c_rf_b[] = { 0x0FE, 0x00000000, 0x0FE, 0x00000000, 0x092, 0x00084800, - 0x08F, 0x0000182C, + 0x08F, 0x00001B4C, 0x088, 0x0004326B, 0x019, 0x00000005, + 0x0EF, 0x00080000, + 0x033, 0x00000004, + 0x03F, 0x000FD83F, + 0x0EF, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000006, + 0x03F, 0x000DD83F, + 0x0EF, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000007, + 0x03F, 0x000DF7BF, + 0x0EF, 0x00000000, + 0x0EF, 0x00040000, + 0x033, 0x00000006, + 0x03F, 0x00000002, + 0x033, 0x00000007, + 0x03F, 0x00000002, + 0x0EF, 0x00000000, }; RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_b, B); -static const u8 rtw8822c_txpwr_lmt_type0[] = { - 0, 0, 0, 0, 1, 72, 2, 0, 0, 0, 1, 60, - 0, 0, 0, 0, 2, 72, 2, 0, 0, 0, 2, 60, - 0, 0, 0, 0, 3, 76, 2, 0, 0, 0, 3, 60, - 0, 0, 0, 0, 4, 76, 2, 0, 0, 0, 4, 60, - 0, 0, 0, 0, 5, 76, 2, 0, 0, 0, 5, 60, - 0, 0, 0, 0, 6, 76, 2, 0, 0, 0, 6, 60, - 0, 0, 0, 0, 7, 76, 2, 0, 0, 0, 7, 60, - 0, 0, 0, 0, 8, 76, 2, 0, 0, 0, 8, 60, - 0, 0, 0, 0, 9, 76, 2, 0, 0, 0, 9, 60, - 0, 0, 0, 0, 10, 72, 2, 0, 0, 0, 10, 60, - 0, 0, 0, 0, 11, 72, 2, 0, 0, 0, 11, 60, - 0, 0, 0, 0, 12, 52, 2, 0, 0, 0, 12, 60, - 0, 0, 0, 0, 13, 48, 2, 0, 0, 0, 13, 60, - 0, 0, 0, 0, 14, 127, 2, 0, 0, 0, 14, 127, - 0, 0, 0, 1, 1, 52, 2, 0, 0, 1, 1, 60, - 0, 0, 0, 1, 2, 60, 2, 0, 0, 1, 2, 60, - 0, 0, 0, 1, 3, 64, 2, 0, 0, 1, 3, 60, - 0, 0, 0, 1, 4, 68, 2, 0, 0, 1, 4, 60, - 0, 0, 0, 1, 5, 76, 2, 0, 0, 1, 5, 60, - 0, 0, 0, 1, 6, 76, 2, 0, 0, 1, 6, 60, - 0, 0, 0, 1, 7, 76, 2, 0, 0, 1, 7, 60, - 0, 0, 0, 1, 8, 68, 2, 0, 0, 1, 8, 60, - 0, 0, 0, 1, 9, 64, 2, 0, 0, 1, 9, 60, - 0, 0, 0, 1, 10, 60, 2, 0, 0, 1, 10, 60, - 0, 0, 0, 1, 11, 52, 2, 0, 0, 1, 11, 60, - 0, 0, 0, 1, 12, 40, 2, 0, 0, 1, 12, 60, - 0, 0, 0, 1, 13, 28, 2, 0, 0, 1, 13, 60, - 0, 0, 0, 1, 14, 127, 2, 0, 0, 1, 14, 127, - 0, 0, 0, 2, 1, 52, 2, 0, 0, 2, 1, 60, - 0, 0, 0, 2, 2, 60, 2, 0, 0, 2, 2, 60, - 0, 0, 0, 2, 3, 64, 2, 0, 0, 2, 3, 60, - 0, 0, 0, 2, 4, 68, 2, 0, 0, 2, 4, 60, - 0, 0, 0, 2, 5, 76, 2, 0, 0, 2, 5, 60, - 0, 0, 0, 2, 6, 76, 2, 0, 0, 2, 6, 60, - 0, 0, 0, 2, 7, 76, 2, 0, 0, 2, 7, 60, - 0, 0, 0, 2, 8, 68, 2, 0, 0, 2, 8, 60, - 0, 0, 0, 2, 9, 64, 2, 0, 0, 2, 9, 60, - 0, 0, 0, 2, 10, 60, 2, 0, 0, 2, 10, 60, - 0, 0, 0, 2, 11, 52, 2, 0, 0, 2, 11, 60, - 0, 0, 0, 2, 12, 40, 2, 0, 0, 2, 12, 60, - 0, 0, 0, 2, 13, 28, 2, 0, 0, 2, 13, 60, - 0, 0, 0, 2, 14, 127, 2, 0, 0, 2, 14, 127, - 0, 0, 0, 3, 1, 52, 2, 0, 0, 3, 1, 36, - 0, 0, 0, 3, 2, 60, 2, 0, 0, 3, 2, 36, - 0, 0, 0, 3, 3, 64, 2, 0, 0, 3, 3, 36, - 0, 0, 0, 3, 4, 68, 2, 0, 0, 3, 4, 36, - 0, 0, 0, 3, 5, 76, 2, 0, 0, 3, 5, 36, - 0, 0, 0, 3, 6, 76, 2, 0, 0, 3, 6, 36, - 0, 0, 0, 3, 7, 76, 2, 0, 0, 3, 7, 36, - 0, 0, 0, 3, 8, 68, 2, 0, 0, 3, 8, 36, - 0, 0, 0, 3, 9, 64, 2, 0, 0, 3, 9, 36, - 0, 0, 0, 3, 10, 60, 2, 0, 0, 3, 10, 36, - 0, 0, 0, 3, 11, 52, 2, 0, 0, 3, 11, 36, - 0, 0, 0, 3, 12, 40, 2, 0, 0, 3, 12, 36, - 0, 0, 0, 3, 13, 28, 2, 0, 0, 3, 13, 36, - 0, 0, 0, 3, 14, 127, 2, 0, 0, 3, 14, 127, - 0, 0, 1, 2, 1, 127, 2, 0, 1, 2, 1, 127, - 0, 0, 1, 2, 2, 127, 2, 0, 1, 2, 2, 127, - 0, 0, 1, 2, 3, 52, 2, 0, 1, 2, 3, 60, - 0, 0, 1, 2, 4, 52, 2, 0, 1, 2, 4, 60, - 0, 0, 1, 2, 5, 60, 2, 0, 1, 2, 5, 60, - 0, 0, 1, 2, 6, 64, 2, 0, 1, 2, 6, 60, - 0, 0, 1, 2, 7, 60, 2, 0, 1, 2, 7, 60, - 0, 0, 1, 2, 8, 52, 2, 0, 1, 2, 8, 60, - 0, 0, 1, 2, 9, 52, 2, 0, 1, 2, 9, 60, - 0, 0, 1, 2, 10, 40, 2, 0, 1, 2, 10, 60, - 0, 0, 1, 2, 11, 28, 2, 0, 1, 2, 11, 60, - 0, 0, 1, 2, 12, 127, 2, 0, 1, 2, 12, 127, - 0, 0, 1, 2, 13, 127, 2, 0, 1, 2, 13, 127, - 0, 0, 1, 2, 14, 127, 2, 0, 1, 2, 14, 127, - 0, 0, 1, 3, 1, 127, 2, 0, 1, 3, 1, 127, - 0, 0, 1, 3, 2, 127, 2, 0, 1, 3, 2, 127, - 0, 0, 1, 3, 3, 48, 2, 0, 1, 3, 3, 36, - 0, 0, 1, 3, 4, 48, 2, 0, 1, 3, 4, 36, - 0, 0, 1, 3, 5, 60, 2, 0, 1, 3, 5, 36, - 0, 0, 1, 3, 6, 64, 2, 0, 1, 3, 6, 36, - 0, 0, 1, 3, 7, 60, 2, 0, 1, 3, 7, 36, - 0, 0, 1, 3, 8, 52, 2, 0, 1, 3, 8, 36, - 0, 0, 1, 3, 9, 52, 2, 0, 1, 3, 9, 36, - 0, 0, 1, 3, 10, 40, 2, 0, 1, 3, 10, 36, - 0, 0, 1, 3, 11, 26, 2, 0, 1, 3, 11, 36, - 0, 0, 1, 3, 12, 127, 2, 0, 1, 3, 12, 127, - 0, 0, 1, 3, 13, 127, 2, 0, 1, 3, 13, 127, - 0, 0, 1, 3, 14, 127, 2, 0, 1, 3, 14, 127, - 0, 1, 0, 1, 36, 74, 2, 1, 0, 1, 36, 62, - 0, 1, 0, 1, 40, 76, 2, 1, 0, 1, 40, 62, - 0, 1, 0, 1, 44, 76, 2, 1, 0, 1, 44, 62, - 0, 1, 0, 1, 48, 76, 2, 1, 0, 1, 48, 62, - 0, 1, 0, 1, 52, 76, 2, 1, 0, 1, 52, 62, - 0, 1, 0, 1, 56, 76, 2, 1, 0, 1, 56, 62, - 0, 1, 0, 1, 60, 76, 2, 1, 0, 1, 60, 62, - 0, 1, 0, 1, 64, 74, 2, 1, 0, 1, 64, 62, - 0, 1, 0, 1, 100, 72, 2, 1, 0, 1, 100, 62, - 0, 1, 0, 1, 104, 76, 2, 1, 0, 1, 104, 62, - 0, 1, 0, 1, 108, 76, 2, 1, 0, 1, 108, 62, - 0, 1, 0, 1, 112, 76, 2, 1, 0, 1, 112, 62, - 0, 1, 0, 1, 116, 76, 2, 1, 0, 1, 116, 62, - 0, 1, 0, 1, 120, 76, 2, 1, 0, 1, 120, 62, - 0, 1, 0, 1, 124, 76, 2, 1, 0, 1, 124, 62, - 0, 1, 0, 1, 128, 76, 2, 1, 0, 1, 128, 62, - 0, 1, 0, 1, 132, 76, 2, 1, 0, 1, 132, 62, - 0, 1, 0, 1, 136, 76, 2, 1, 0, 1, 136, 62, - 0, 1, 0, 1, 140, 72, 2, 1, 0, 1, 140, 62, - 0, 1, 0, 1, 144, 76, 2, 1, 0, 1, 144, 127, - 0, 1, 0, 1, 149, 76, 2, 1, 0, 1, 149, -128, - 0, 1, 0, 1, 153, 76, 2, 1, 0, 1, 153, -128, - 0, 1, 0, 1, 157, 76, 2, 1, 0, 1, 157, -128, - 0, 1, 0, 1, 161, 76, 2, 1, 0, 1, 161, -128, - 0, 1, 0, 1, 165, 76, 2, 1, 0, 1, 165, -128, - 0, 1, 0, 2, 36, 72, 2, 1, 0, 2, 36, 62, - 0, 1, 0, 2, 40, 76, 2, 1, 0, 2, 40, 62, - 0, 1, 0, 2, 44, 76, 2, 1, 0, 2, 44, 62, - 0, 1, 0, 2, 48, 76, 2, 1, 0, 2, 48, 62, - 0, 1, 0, 2, 52, 76, 2, 1, 0, 2, 52, 62, - 0, 1, 0, 2, 56, 76, 2, 1, 0, 2, 56, 62, - 0, 1, 0, 2, 60, 76, 2, 1, 0, 2, 60, 62, - 0, 1, 0, 2, 64, 74, 2, 1, 0, 2, 64, 62, - 0, 1, 0, 2, 100, 70, 2, 1, 0, 2, 100, 62, - 0, 1, 0, 2, 104, 76, 2, 1, 0, 2, 104, 62, - 0, 1, 0, 2, 108, 76, 2, 1, 0, 2, 108, 62, - 0, 1, 0, 2, 112, 76, 2, 1, 0, 2, 112, 62, - 0, 1, 0, 2, 116, 76, 2, 1, 0, 2, 116, 62, - 0, 1, 0, 2, 120, 76, 2, 1, 0, 2, 120, 62, - 0, 1, 0, 2, 124, 76, 2, 1, 0, 2, 124, 62, - 0, 1, 0, 2, 128, 76, 2, 1, 0, 2, 128, 62, - 0, 1, 0, 2, 132, 76, 2, 1, 0, 2, 132, 62, - 0, 1, 0, 2, 136, 76, 2, 1, 0, 2, 136, 62, - 0, 1, 0, 2, 140, 70, 2, 1, 0, 2, 140, 62, - 0, 1, 0, 2, 144, 76, 2, 1, 0, 2, 144, 127, - 0, 1, 0, 2, 149, 76, 2, 1, 0, 2, 149, -128, - 0, 1, 0, 2, 153, 76, 2, 1, 0, 2, 153, -128, - 0, 1, 0, 2, 157, 76, 2, 1, 0, 2, 157, -128, - 0, 1, 0, 2, 161, 76, 2, 1, 0, 2, 161, -128, - 0, 1, 0, 2, 165, 76, 2, 1, 0, 2, 165, -128, - 0, 1, 0, 3, 36, 68, 2, 1, 0, 3, 36, 38, - 0, 1, 0, 3, 40, 68, 2, 1, 0, 3, 40, 38, - 0, 1, 0, 3, 44, 68, 2, 1, 0, 3, 44, 38, - 0, 1, 0, 3, 48, 68, 2, 1, 0, 3, 48, 38, - 0, 1, 0, 3, 52, 68, 2, 1, 0, 3, 52, 38, - 0, 1, 0, 3, 56, 68, 2, 1, 0, 3, 56, 38, - 0, 1, 0, 3, 60, 66, 2, 1, 0, 3, 60, 38, - 0, 1, 0, 3, 64, 68, 2, 1, 0, 3, 64, 38, - 0, 1, 0, 3, 100, 60, 2, 1, 0, 3, 100, 38, - 0, 1, 0, 3, 104, 68, 2, 1, 0, 3, 104, 38, - 0, 1, 0, 3, 108, 68, 2, 1, 0, 3, 108, 38, - 0, 1, 0, 3, 112, 68, 2, 1, 0, 3, 112, 38, - 0, 1, 0, 3, 116, 68, 2, 1, 0, 3, 116, 38, - 0, 1, 0, 3, 120, 68, 2, 1, 0, 3, 120, 38, - 0, 1, 0, 3, 124, 68, 2, 1, 0, 3, 124, 38, - 0, 1, 0, 3, 128, 68, 2, 1, 0, 3, 128, 38, - 0, 1, 0, 3, 132, 68, 2, 1, 0, 3, 132, 38, - 0, 1, 0, 3, 136, 68, 2, 1, 0, 3, 136, 38, - 0, 1, 0, 3, 140, 60, 2, 1, 0, 3, 140, 38, - 0, 1, 0, 3, 144, 68, 2, 1, 0, 3, 144, 127, - 0, 1, 0, 3, 149, 76, 2, 1, 0, 3, 149, -128, - 0, 1, 0, 3, 153, 76, 2, 1, 0, 3, 153, -128, - 0, 1, 0, 3, 157, 76, 2, 1, 0, 3, 157, -128, - 0, 1, 0, 3, 161, 76, 2, 1, 0, 3, 161, -128, - 0, 1, 0, 3, 165, 76, 2, 1, 0, 3, 165, -128, - 0, 1, 1, 2, 38, 66, 2, 1, 1, 2, 38, 64, - 0, 1, 1, 2, 46, 72, 2, 1, 1, 2, 46, 64, - 0, 1, 1, 2, 54, 72, 2, 1, 1, 2, 54, 64, - 0, 1, 1, 2, 62, 64, 2, 1, 1, 2, 62, 64, - 0, 1, 1, 2, 102, 58, 2, 1, 1, 2, 102, 64, - 0, 1, 1, 2, 110, 72, 2, 1, 1, 2, 110, 64, - 0, 1, 1, 2, 118, 72, 2, 1, 1, 2, 118, 64, - 0, 1, 1, 2, 126, 72, 2, 1, 1, 2, 126, 64, - 0, 1, 1, 2, 134, 72, 2, 1, 1, 2, 134, 64, - 0, 1, 1, 2, 142, 72, 2, 1, 1, 2, 142, 127, - 0, 1, 1, 2, 151, 72, 2, 1, 1, 2, 151, -128, - 0, 1, 1, 2, 159, 72, 2, 1, 1, 2, 159, -128, - 0, 1, 1, 3, 38, 60, 2, 1, 1, 3, 38, 40, - 0, 1, 1, 3, 46, 68, 2, 1, 1, 3, 46, 40, - 0, 1, 1, 3, 54, 68, 2, 1, 1, 3, 54, 40, - 0, 1, 1, 3, 62, 58, 2, 1, 1, 3, 62, 40, - 0, 1, 1, 3, 102, 54, 2, 1, 1, 3, 102, 40, - 0, 1, 1, 3, 110, 68, 2, 1, 1, 3, 110, 40, - 0, 1, 1, 3, 118, 68, 2, 1, 1, 3, 118, 40, - 0, 1, 1, 3, 126, 68, 2, 1, 1, 3, 126, 40, - 0, 1, 1, 3, 134, 68, 2, 1, 1, 3, 134, 40, - 0, 1, 1, 3, 142, 68, 2, 1, 1, 3, 142, 127, - 0, 1, 1, 3, 151, 72, 2, 1, 1, 3, 151, -128, - 0, 1, 1, 3, 159, 72, 2, 1, 1, 3, 159, -128, - 0, 1, 2, 4, 42, 64, 2, 1, 2, 4, 42, 64, - 0, 1, 2, 4, 58, 62, 2, 1, 2, 4, 58, 64, - 0, 1, 2, 4, 106, 58, 2, 1, 2, 4, 106, 64, - 0, 1, 2, 4, 122, 72, 2, 1, 2, 4, 122, 64, - 0, 1, 2, 4, 138, 72, 2, 1, 2, 4, 138, 127, - 0, 1, 2, 4, 155, 72, 2, 1, 2, 4, 155, -128, - 0, 1, 2, 5, 42, 54, 2, 1, 2, 5, 42, 40, - 0, 1, 2, 5, 58, 52, 2, 1, 2, 5, 58, 40, - 0, 1, 2, 5, 106, 50, 2, 1, 2, 5, 106, 40, - 0, 1, 2, 5, 122, 66, 2, 1, 2, 5, 122, 40, - 0, 1, 2, 5, 138, 66, 2, 1, 2, 5, 138, 127, - 0, 1, 2, 5, 155, 62, 2, 1, 2, 5, 155, -128, - 1, 0, 0, 0, 1, 68, 3, 0, 0, 0, 1, 72, - 4, 0, 0, 0, 1, 76, 5, 0, 0, 0, 1, 60, - 6, 0, 0, 0, 1, 72, 7, 0, 0, 0, 1, 60, - 8, 0, 0, 0, 1, 72, 1, 0, 0, 0, 2, 68, - 3, 0, 0, 0, 2, 72, 4, 0, 0, 0, 2, 76, - 5, 0, 0, 0, 2, 60, 6, 0, 0, 0, 2, 72, - 7, 0, 0, 0, 2, 60, 8, 0, 0, 0, 2, 72, - 1, 0, 0, 0, 3, 68, 3, 0, 0, 0, 3, 76, - 4, 0, 0, 0, 3, 76, 5, 0, 0, 0, 3, 60, - 6, 0, 0, 0, 3, 76, 7, 0, 0, 0, 3, 60, - 8, 0, 0, 0, 3, 76, 1, 0, 0, 0, 4, 68, - 3, 0, 0, 0, 4, 76, 4, 0, 0, 0, 4, 76, - 5, 0, 0, 0, 4, 60, 6, 0, 0, 0, 4, 76, - 7, 0, 0, 0, 4, 60, 8, 0, 0, 0, 4, 76, - 1, 0, 0, 0, 5, 68, 3, 0, 0, 0, 5, 76, - 4, 0, 0, 0, 5, 76, 5, 0, 0, 0, 5, 60, - 6, 0, 0, 0, 5, 76, 7, 0, 0, 0, 5, 60, - 8, 0, 0, 0, 5, 76, 1, 0, 0, 0, 6, 68, - 3, 0, 0, 0, 6, 76, 4, 0, 0, 0, 6, 76, - 5, 0, 0, 0, 6, 60, 6, 0, 0, 0, 6, 76, - 7, 0, 0, 0, 6, 60, 8, 0, 0, 0, 6, 76, - 1, 0, 0, 0, 7, 68, 3, 0, 0, 0, 7, 76, - 4, 0, 0, 0, 7, 76, 5, 0, 0, 0, 7, 60, - 6, 0, 0, 0, 7, 76, 7, 0, 0, 0, 7, 60, - 8, 0, 0, 0, 7, 76, 1, 0, 0, 0, 8, 68, - 3, 0, 0, 0, 8, 76, 4, 0, 0, 0, 8, 76, - 5, 0, 0, 0, 8, 60, 6, 0, 0, 0, 8, 76, - 7, 0, 0, 0, 8, 60, 8, 0, 0, 0, 8, 76, - 1, 0, 0, 0, 9, 68, 3, 0, 0, 0, 9, 76, - 4, 0, 0, 0, 9, 76, 5, 0, 0, 0, 9, 60, - 6, 0, 0, 0, 9, 76, 7, 0, 0, 0, 9, 60, - 8, 0, 0, 0, 9, 76, 1, 0, 0, 0, 10, 68, - 3, 0, 0, 0, 10, 72, 4, 0, 0, 0, 10, 76, - 5, 0, 0, 0, 10, 60, 6, 0, 0, 0, 10, 72, - 7, 0, 0, 0, 10, 60, 8, 0, 0, 0, 10, 72, - 1, 0, 0, 0, 11, 68, 3, 0, 0, 0, 11, 72, - 4, 0, 0, 0, 11, 76, 5, 0, 0, 0, 11, 60, - 6, 0, 0, 0, 11, 72, 7, 0, 0, 0, 11, 60, - 8, 0, 0, 0, 11, 72, 1, 0, 0, 0, 12, 68, - 3, 0, 0, 0, 12, 52, 4, 0, 0, 0, 12, 76, - 5, 0, 0, 0, 12, 60, 6, 0, 0, 0, 12, 52, - 7, 0, 0, 0, 12, 60, 8, 0, 0, 0, 12, 52, - 1, 0, 0, 0, 13, 68, 3, 0, 0, 0, 13, 48, - 4, 0, 0, 0, 13, 76, 5, 0, 0, 0, 13, 60, - 6, 0, 0, 0, 13, 48, 7, 0, 0, 0, 13, 60, - 8, 0, 0, 0, 13, 48, 1, 0, 0, 0, 14, 68, - 3, 0, 0, 0, 14, 127, 4, 0, 0, 0, 14, 127, - 5, 0, 0, 0, 14, 127, 6, 0, 0, 0, 14, 127, - 7, 0, 0, 0, 14, 127, 8, 0, 0, 0, 14, 127, - 1, 0, 0, 1, 1, 76, 3, 0, 0, 1, 1, 52, - 4, 0, 0, 1, 1, 76, 5, 0, 0, 1, 1, 60, - 6, 0, 0, 1, 1, 52, 7, 0, 0, 1, 1, 60, - 8, 0, 0, 1, 1, 52, 1, 0, 0, 1, 2, 76, - 3, 0, 0, 1, 2, 60, 4, 0, 0, 1, 2, 76, - 5, 0, 0, 1, 2, 60, 6, 0, 0, 1, 2, 60, - 7, 0, 0, 1, 2, 60, 8, 0, 0, 1, 2, 60, - 1, 0, 0, 1, 3, 76, 3, 0, 0, 1, 3, 64, - 4, 0, 0, 1, 3, 76, 5, 0, 0, 1, 3, 60, - 6, 0, 0, 1, 3, 64, 7, 0, 0, 1, 3, 60, - 8, 0, 0, 1, 3, 64, 1, 0, 0, 1, 4, 76, - 3, 0, 0, 1, 4, 68, 4, 0, 0, 1, 4, 76, - 5, 0, 0, 1, 4, 60, 6, 0, 0, 1, 4, 68, - 7, 0, 0, 1, 4, 60, 8, 0, 0, 1, 4, 68, - 1, 0, 0, 1, 5, 76, 3, 0, 0, 1, 5, 76, - 4, 0, 0, 1, 5, 76, 5, 0, 0, 1, 5, 60, - 6, 0, 0, 1, 5, 76, 7, 0, 0, 1, 5, 60, - 8, 0, 0, 1, 5, 76, 1, 0, 0, 1, 6, 76, - 3, 0, 0, 1, 6, 76, 4, 0, 0, 1, 6, 76, - 5, 0, 0, 1, 6, 60, 6, 0, 0, 1, 6, 76, - 7, 0, 0, 1, 6, 60, 8, 0, 0, 1, 6, 76, - 1, 0, 0, 1, 7, 76, 3, 0, 0, 1, 7, 76, - 4, 0, 0, 1, 7, 76, 5, 0, 0, 1, 7, 60, - 6, 0, 0, 1, 7, 76, 7, 0, 0, 1, 7, 60, - 8, 0, 0, 1, 7, 76, 1, 0, 0, 1, 8, 76, - 3, 0, 0, 1, 8, 68, 4, 0, 0, 1, 8, 76, - 5, 0, 0, 1, 8, 60, 6, 0, 0, 1, 8, 68, - 7, 0, 0, 1, 8, 60, 8, 0, 0, 1, 8, 68, - 1, 0, 0, 1, 9, 76, 3, 0, 0, 1, 9, 64, - 4, 0, 0, 1, 9, 76, 5, 0, 0, 1, 9, 60, - 6, 0, 0, 1, 9, 64, 7, 0, 0, 1, 9, 60, - 8, 0, 0, 1, 9, 64, 1, 0, 0, 1, 10, 76, - 3, 0, 0, 1, 10, 60, 4, 0, 0, 1, 10, 76, - 5, 0, 0, 1, 10, 60, 6, 0, 0, 1, 10, 60, - 7, 0, 0, 1, 10, 60, 8, 0, 0, 1, 10, 60, - 1, 0, 0, 1, 11, 76, 3, 0, 0, 1, 11, 52, - 4, 0, 0, 1, 11, 76, 5, 0, 0, 1, 11, 60, - 6, 0, 0, 1, 11, 52, 7, 0, 0, 1, 11, 60, - 8, 0, 0, 1, 11, 52, 1, 0, 0, 1, 12, 76, - 3, 0, 0, 1, 12, 40, 4, 0, 0, 1, 12, 76, - 5, 0, 0, 1, 12, 60, 6, 0, 0, 1, 12, 40, - 7, 0, 0, 1, 12, 60, 8, 0, 0, 1, 12, 40, - 1, 0, 0, 1, 13, 76, 3, 0, 0, 1, 13, 28, - 4, 0, 0, 1, 13, 70, 5, 0, 0, 1, 13, 60, - 6, 0, 0, 1, 13, 28, 7, 0, 0, 1, 13, 60, - 8, 0, 0, 1, 13, 28, 1, 0, 0, 1, 14, 127, - 3, 0, 0, 1, 14, 127, 4, 0, 0, 1, 14, 127, - 5, 0, 0, 1, 14, 127, 6, 0, 0, 1, 14, 127, - 7, 0, 0, 1, 14, 127, 8, 0, 0, 1, 14, 127, - 1, 0, 0, 2, 1, 76, 3, 0, 0, 2, 1, 52, - 4, 0, 0, 2, 1, 76, 5, 0, 0, 2, 1, 60, - 6, 0, 0, 2, 1, 52, 7, 0, 0, 2, 1, 60, - 8, 0, 0, 2, 1, 52, 1, 0, 0, 2, 2, 76, - 3, 0, 0, 2, 2, 60, 4, 0, 0, 2, 2, 76, - 5, 0, 0, 2, 2, 60, 6, 0, 0, 2, 2, 60, - 7, 0, 0, 2, 2, 60, 8, 0, 0, 2, 2, 60, - 1, 0, 0, 2, 3, 76, 3, 0, 0, 2, 3, 64, - 4, 0, 0, 2, 3, 76, 5, 0, 0, 2, 3, 60, - 6, 0, 0, 2, 3, 64, 7, 0, 0, 2, 3, 60, - 8, 0, 0, 2, 3, 64, 1, 0, 0, 2, 4, 76, - 3, 0, 0, 2, 4, 68, 4, 0, 0, 2, 4, 76, - 5, 0, 0, 2, 4, 60, 6, 0, 0, 2, 4, 68, - 7, 0, 0, 2, 4, 60, 8, 0, 0, 2, 4, 68, - 1, 0, 0, 2, 5, 76, 3, 0, 0, 2, 5, 76, - 4, 0, 0, 2, 5, 76, 5, 0, 0, 2, 5, 60, - 6, 0, 0, 2, 5, 76, 7, 0, 0, 2, 5, 60, - 8, 0, 0, 2, 5, 76, 1, 0, 0, 2, 6, 76, - 3, 0, 0, 2, 6, 76, 4, 0, 0, 2, 6, 76, - 5, 0, 0, 2, 6, 60, 6, 0, 0, 2, 6, 76, - 7, 0, 0, 2, 6, 60, 8, 0, 0, 2, 6, 76, - 1, 0, 0, 2, 7, 76, 3, 0, 0, 2, 7, 76, - 4, 0, 0, 2, 7, 76, 5, 0, 0, 2, 7, 60, - 6, 0, 0, 2, 7, 76, 7, 0, 0, 2, 7, 60, - 8, 0, 0, 2, 7, 76, 1, 0, 0, 2, 8, 76, - 3, 0, 0, 2, 8, 68, 4, 0, 0, 2, 8, 76, - 5, 0, 0, 2, 8, 60, 6, 0, 0, 2, 8, 68, - 7, 0, 0, 2, 8, 60, 8, 0, 0, 2, 8, 68, - 1, 0, 0, 2, 9, 76, 3, 0, 0, 2, 9, 64, - 4, 0, 0, 2, 9, 76, 5, 0, 0, 2, 9, 60, - 6, 0, 0, 2, 9, 64, 7, 0, 0, 2, 9, 60, - 8, 0, 0, 2, 9, 64, 1, 0, 0, 2, 10, 76, - 3, 0, 0, 2, 10, 60, 4, 0, 0, 2, 10, 76, - 5, 0, 0, 2, 10, 60, 6, 0, 0, 2, 10, 60, - 7, 0, 0, 2, 10, 60, 8, 0, 0, 2, 10, 60, - 1, 0, 0, 2, 11, 76, 3, 0, 0, 2, 11, 52, - 4, 0, 0, 2, 11, 76, 5, 0, 0, 2, 11, 60, - 6, 0, 0, 2, 11, 52, 7, 0, 0, 2, 11, 60, - 8, 0, 0, 2, 11, 52, 1, 0, 0, 2, 12, 76, - 3, 0, 0, 2, 12, 40, 4, 0, 0, 2, 12, 76, - 5, 0, 0, 2, 12, 60, 6, 0, 0, 2, 12, 40, - 7, 0, 0, 2, 12, 60, 8, 0, 0, 2, 12, 40, - 1, 0, 0, 2, 13, 76, 3, 0, 0, 2, 13, 28, - 4, 0, 0, 2, 13, 72, 5, 0, 0, 2, 13, 60, - 6, 0, 0, 2, 13, 28, 7, 0, 0, 2, 13, 60, - 8, 0, 0, 2, 13, 28, 1, 0, 0, 2, 14, 127, - 3, 0, 0, 2, 14, 127, 4, 0, 0, 2, 14, 127, - 5, 0, 0, 2, 14, 127, 6, 0, 0, 2, 14, 127, - 7, 0, 0, 2, 14, 127, 8, 0, 0, 2, 14, 127, - 1, 0, 0, 3, 1, 66, 3, 0, 0, 3, 1, 52, - 4, 0, 0, 3, 1, 68, 5, 0, 0, 3, 1, 36, - 6, 0, 0, 3, 1, 52, 7, 0, 0, 3, 1, 36, - 8, 0, 0, 3, 1, 52, 1, 0, 0, 3, 2, 66, - 3, 0, 0, 3, 2, 60, 4, 0, 0, 3, 2, 70, - 5, 0, 0, 3, 2, 36, 6, 0, 0, 3, 2, 60, - 7, 0, 0, 3, 2, 36, 8, 0, 0, 3, 2, 60, - 1, 0, 0, 3, 3, 66, 3, 0, 0, 3, 3, 64, - 4, 0, 0, 3, 3, 70, 5, 0, 0, 3, 3, 36, - 6, 0, 0, 3, 3, 64, 7, 0, 0, 3, 3, 36, - 8, 0, 0, 3, 3, 64, 1, 0, 0, 3, 4, 66, - 3, 0, 0, 3, 4, 68, 4, 0, 0, 3, 4, 70, - 5, 0, 0, 3, 4, 36, 6, 0, 0, 3, 4, 68, - 7, 0, 0, 3, 4, 36, 8, 0, 0, 3, 4, 68, - 1, 0, 0, 3, 5, 66, 3, 0, 0, 3, 5, 76, - 4, 0, 0, 3, 5, 70, 5, 0, 0, 3, 5, 36, - 6, 0, 0, 3, 5, 76, 7, 0, 0, 3, 5, 36, - 8, 0, 0, 3, 5, 76, 1, 0, 0, 3, 6, 66, - 3, 0, 0, 3, 6, 76, 4, 0, 0, 3, 6, 70, - 5, 0, 0, 3, 6, 36, 6, 0, 0, 3, 6, 76, - 7, 0, 0, 3, 6, 36, 8, 0, 0, 3, 6, 76, - 1, 0, 0, 3, 7, 66, 3, 0, 0, 3, 7, 76, - 4, 0, 0, 3, 7, 70, 5, 0, 0, 3, 7, 36, - 6, 0, 0, 3, 7, 76, 7, 0, 0, 3, 7, 36, - 8, 0, 0, 3, 7, 76, 1, 0, 0, 3, 8, 66, - 3, 0, 0, 3, 8, 68, 4, 0, 0, 3, 8, 70, - 5, 0, 0, 3, 8, 36, 6, 0, 0, 3, 8, 68, - 7, 0, 0, 3, 8, 36, 8, 0, 0, 3, 8, 68, - 1, 0, 0, 3, 9, 66, 3, 0, 0, 3, 9, 64, - 4, 0, 0, 3, 9, 70, 5, 0, 0, 3, 9, 36, - 6, 0, 0, 3, 9, 64, 7, 0, 0, 3, 9, 36, - 8, 0, 0, 3, 9, 64, 1, 0, 0, 3, 10, 66, - 3, 0, 0, 3, 10, 60, 4, 0, 0, 3, 10, 70, - 5, 0, 0, 3, 10, 36, 6, 0, 0, 3, 10, 60, - 7, 0, 0, 3, 10, 36, 8, 0, 0, 3, 10, 60, - 1, 0, 0, 3, 11, 66, 3, 0, 0, 3, 11, 52, - 4, 0, 0, 3, 11, 70, 5, 0, 0, 3, 11, 36, - 6, 0, 0, 3, 11, 52, 7, 0, 0, 3, 11, 36, - 8, 0, 0, 3, 11, 52, 1, 0, 0, 3, 12, 66, - 3, 0, 0, 3, 12, 40, 4, 0, 0, 3, 12, 70, - 5, 0, 0, 3, 12, 36, 6, 0, 0, 3, 12, 40, - 7, 0, 0, 3, 12, 36, 8, 0, 0, 3, 12, 40, - 1, 0, 0, 3, 13, 66, 3, 0, 0, 3, 13, 28, - 4, 0, 0, 3, 13, 62, 5, 0, 0, 3, 13, 36, - 6, 0, 0, 3, 13, 28, 7, 0, 0, 3, 13, 36, - 8, 0, 0, 3, 13, 28, 1, 0, 0, 3, 14, 127, - 3, 0, 0, 3, 14, 127, 4, 0, 0, 3, 14, 127, - 5, 0, 0, 3, 14, 127, 6, 0, 0, 3, 14, 127, - 7, 0, 0, 3, 14, 127, 8, 0, 0, 3, 14, 127, - 1, 0, 1, 2, 1, 127, 3, 0, 1, 2, 1, 127, - 4, 0, 1, 2, 1, 127, 5, 0, 1, 2, 1, 127, - 6, 0, 1, 2, 1, 127, 7, 0, 1, 2, 1, 127, - 8, 0, 1, 2, 1, 127, 1, 0, 1, 2, 2, 127, - 3, 0, 1, 2, 2, 127, 4, 0, 1, 2, 2, 127, - 5, 0, 1, 2, 2, 127, 6, 0, 1, 2, 2, 127, - 7, 0, 1, 2, 2, 127, 8, 0, 1, 2, 2, 127, - 1, 0, 1, 2, 3, 72, 3, 0, 1, 2, 3, 52, - 4, 0, 1, 2, 3, 72, 5, 0, 1, 2, 3, 60, - 6, 0, 1, 2, 3, 52, 7, 0, 1, 2, 3, 60, - 8, 0, 1, 2, 3, 52, 1, 0, 1, 2, 4, 72, - 3, 0, 1, 2, 4, 52, 4, 0, 1, 2, 4, 72, - 5, 0, 1, 2, 4, 60, 6, 0, 1, 2, 4, 52, - 7, 0, 1, 2, 4, 60, 8, 0, 1, 2, 4, 52, - 1, 0, 1, 2, 5, 72, 3, 0, 1, 2, 5, 60, - 4, 0, 1, 2, 5, 72, 5, 0, 1, 2, 5, 60, - 6, 0, 1, 2, 5, 60, 7, 0, 1, 2, 5, 60, - 8, 0, 1, 2, 5, 60, 1, 0, 1, 2, 6, 72, - 3, 0, 1, 2, 6, 64, 4, 0, 1, 2, 6, 72, - 5, 0, 1, 2, 6, 60, 6, 0, 1, 2, 6, 64, - 7, 0, 1, 2, 6, 60, 8, 0, 1, 2, 6, 64, - 1, 0, 1, 2, 7, 72, 3, 0, 1, 2, 7, 60, - 4, 0, 1, 2, 7, 72, 5, 0, 1, 2, 7, 60, - 6, 0, 1, 2, 7, 60, 7, 0, 1, 2, 7, 60, - 8, 0, 1, 2, 7, 60, 1, 0, 1, 2, 8, 72, - 3, 0, 1, 2, 8, 52, 4, 0, 1, 2, 8, 72, - 5, 0, 1, 2, 8, 60, 6, 0, 1, 2, 8, 52, - 7, 0, 1, 2, 8, 60, 8, 0, 1, 2, 8, 52, - 1, 0, 1, 2, 9, 72, 3, 0, 1, 2, 9, 52, - 4, 0, 1, 2, 9, 72, 5, 0, 1, 2, 9, 60, - 6, 0, 1, 2, 9, 52, 7, 0, 1, 2, 9, 60, - 8, 0, 1, 2, 9, 52, 1, 0, 1, 2, 10, 72, - 3, 0, 1, 2, 10, 40, 4, 0, 1, 2, 10, 72, - 5, 0, 1, 2, 10, 60, 6, 0, 1, 2, 10, 40, - 7, 0, 1, 2, 10, 60, 8, 0, 1, 2, 10, 40, - 1, 0, 1, 2, 11, 72, 3, 0, 1, 2, 11, 28, - 4, 0, 1, 2, 11, 70, 5, 0, 1, 2, 11, 60, - 6, 0, 1, 2, 11, 28, 7, 0, 1, 2, 11, 60, - 8, 0, 1, 2, 11, 28, 1, 0, 1, 2, 12, 127, - 3, 0, 1, 2, 12, 127, 4, 0, 1, 2, 12, 127, - 5, 0, 1, 2, 12, 127, 6, 0, 1, 2, 12, 127, - 7, 0, 1, 2, 12, 127, 8, 0, 1, 2, 12, 127, - 1, 0, 1, 2, 13, 127, 3, 0, 1, 2, 13, 127, - 4, 0, 1, 2, 13, 127, 5, 0, 1, 2, 13, 127, - 6, 0, 1, 2, 13, 127, 7, 0, 1, 2, 13, 127, - 8, 0, 1, 2, 13, 127, 1, 0, 1, 2, 14, 127, - 3, 0, 1, 2, 14, 127, 4, 0, 1, 2, 14, 127, - 5, 0, 1, 2, 14, 127, 6, 0, 1, 2, 14, 127, - 7, 0, 1, 2, 14, 127, 8, 0, 1, 2, 14, 127, - 1, 0, 1, 3, 1, 127, 3, 0, 1, 3, 1, 127, - 4, 0, 1, 3, 1, 127, 5, 0, 1, 3, 1, 127, - 6, 0, 1, 3, 1, 127, 7, 0, 1, 3, 1, 127, - 8, 0, 1, 3, 1, 127, 1, 0, 1, 3, 2, 127, - 3, 0, 1, 3, 2, 127, 4, 0, 1, 3, 2, 127, - 5, 0, 1, 3, 2, 127, 6, 0, 1, 3, 2, 127, - 7, 0, 1, 3, 2, 127, 8, 0, 1, 3, 2, 127, - 1, 0, 1, 3, 3, 66, 3, 0, 1, 3, 3, 48, - 4, 0, 1, 3, 3, 66, 5, 0, 1, 3, 3, 36, - 6, 0, 1, 3, 3, 48, 7, 0, 1, 3, 3, 36, - 8, 0, 1, 3, 3, 48, 1, 0, 1, 3, 4, 66, - 3, 0, 1, 3, 4, 48, 4, 0, 1, 3, 4, 70, - 5, 0, 1, 3, 4, 36, 6, 0, 1, 3, 4, 48, - 7, 0, 1, 3, 4, 36, 8, 0, 1, 3, 4, 48, - 1, 0, 1, 3, 5, 66, 3, 0, 1, 3, 5, 60, - 4, 0, 1, 3, 5, 70, 5, 0, 1, 3, 5, 36, - 6, 0, 1, 3, 5, 60, 7, 0, 1, 3, 5, 36, - 8, 0, 1, 3, 5, 60, 1, 0, 1, 3, 6, 66, - 3, 0, 1, 3, 6, 64, 4, 0, 1, 3, 6, 70, - 5, 0, 1, 3, 6, 36, 6, 0, 1, 3, 6, 64, - 7, 0, 1, 3, 6, 36, 8, 0, 1, 3, 6, 64, - 1, 0, 1, 3, 7, 66, 3, 0, 1, 3, 7, 60, - 4, 0, 1, 3, 7, 70, 5, 0, 1, 3, 7, 36, - 6, 0, 1, 3, 7, 60, 7, 0, 1, 3, 7, 36, - 8, 0, 1, 3, 7, 60, 1, 0, 1, 3, 8, 66, - 3, 0, 1, 3, 8, 52, 4, 0, 1, 3, 8, 70, - 5, 0, 1, 3, 8, 36, 6, 0, 1, 3, 8, 52, - 7, 0, 1, 3, 8, 36, 8, 0, 1, 3, 8, 52, - 1, 0, 1, 3, 9, 66, 3, 0, 1, 3, 9, 52, - 4, 0, 1, 3, 9, 70, 5, 0, 1, 3, 9, 36, - 6, 0, 1, 3, 9, 52, 7, 0, 1, 3, 9, 36, - 8, 0, 1, 3, 9, 52, 1, 0, 1, 3, 10, 66, - 3, 0, 1, 3, 10, 40, 4, 0, 1, 3, 10, 70, - 5, 0, 1, 3, 10, 36, 6, 0, 1, 3, 10, 40, - 7, 0, 1, 3, 10, 36, 8, 0, 1, 3, 10, 40, - 1, 0, 1, 3, 11, 66, 3, 0, 1, 3, 11, 26, - 4, 0, 1, 3, 11, 66, 5, 0, 1, 3, 11, 36, - 6, 0, 1, 3, 11, 26, 7, 0, 1, 3, 11, 36, - 8, 0, 1, 3, 11, 26, 1, 0, 1, 3, 12, 127, - 3, 0, 1, 3, 12, 127, 4, 0, 1, 3, 12, 127, - 5, 0, 1, 3, 12, 127, 6, 0, 1, 3, 12, 127, - 7, 0, 1, 3, 12, 127, 8, 0, 1, 3, 12, 127, - 1, 0, 1, 3, 13, 127, 3, 0, 1, 3, 13, 127, - 4, 0, 1, 3, 13, 127, 5, 0, 1, 3, 13, 127, - 6, 0, 1, 3, 13, 127, 7, 0, 1, 3, 13, 127, - 8, 0, 1, 3, 13, 127, 1, 0, 1, 3, 14, 127, - 3, 0, 1, 3, 14, 127, 4, 0, 1, 3, 14, 127, - 5, 0, 1, 3, 14, 127, 6, 0, 1, 3, 14, 127, - 7, 0, 1, 3, 14, 127, 8, 0, 1, 3, 14, 127, - 1, 1, 0, 1, 36, 60, 3, 1, 0, 1, 36, 62, - 4, 1, 0, 1, 36, 76, 5, 1, 0, 1, 36, 62, - 6, 1, 0, 1, 36, 64, 7, 1, 0, 1, 36, 54, - 8, 1, 0, 1, 36, 62, 1, 1, 0, 1, 40, 62, - 3, 1, 0, 1, 40, 62, 4, 1, 0, 1, 40, 76, - 5, 1, 0, 1, 40, 62, 6, 1, 0, 1, 40, 64, - 7, 1, 0, 1, 40, 54, 8, 1, 0, 1, 40, 62, - 1, 1, 0, 1, 44, 62, 3, 1, 0, 1, 44, 62, - 4, 1, 0, 1, 44, 76, 5, 1, 0, 1, 44, 62, - 6, 1, 0, 1, 44, 64, 7, 1, 0, 1, 44, 54, - 8, 1, 0, 1, 44, 62, 1, 1, 0, 1, 48, 62, - 3, 1, 0, 1, 48, 62, 4, 1, 0, 1, 48, 76, - 5, 1, 0, 1, 48, 62, 6, 1, 0, 1, 48, 64, - 7, 1, 0, 1, 48, 54, 8, 1, 0, 1, 48, 62, - 1, 1, 0, 1, 52, 62, 3, 1, 0, 1, 52, 64, - 4, 1, 0, 1, 52, 76, 5, 1, 0, 1, 52, 62, - 6, 1, 0, 1, 52, 76, 7, 1, 0, 1, 52, 54, - 8, 1, 0, 1, 52, 76, 1, 1, 0, 1, 56, 62, - 3, 1, 0, 1, 56, 64, 4, 1, 0, 1, 56, 76, - 5, 1, 0, 1, 56, 62, 6, 1, 0, 1, 56, 76, - 7, 1, 0, 1, 56, 54, 8, 1, 0, 1, 56, 76, - 1, 1, 0, 1, 60, 62, 3, 1, 0, 1, 60, 64, - 4, 1, 0, 1, 60, 76, 5, 1, 0, 1, 60, 62, - 6, 1, 0, 1, 60, 76, 7, 1, 0, 1, 60, 54, - 8, 1, 0, 1, 60, 76, 1, 1, 0, 1, 64, 60, - 3, 1, 0, 1, 64, 64, 4, 1, 0, 1, 64, 76, - 5, 1, 0, 1, 64, 62, 6, 1, 0, 1, 64, 74, - 7, 1, 0, 1, 64, 54, 8, 1, 0, 1, 64, 74, - 1, 1, 0, 1, 100, 76, 3, 1, 0, 1, 100, 72, - 4, 1, 0, 1, 100, 76, 5, 1, 0, 1, 100, 62, - 6, 1, 0, 1, 100, 72, 7, 1, 0, 1, 100, 54, - 8, 1, 0, 1, 100, 72, 1, 1, 0, 1, 104, 76, - 3, 1, 0, 1, 104, 76, 4, 1, 0, 1, 104, 76, - 5, 1, 0, 1, 104, 62, 6, 1, 0, 1, 104, 76, - 7, 1, 0, 1, 104, 54, 8, 1, 0, 1, 104, 76, - 1, 1, 0, 1, 108, 76, 3, 1, 0, 1, 108, 76, - 4, 1, 0, 1, 108, 76, 5, 1, 0, 1, 108, 62, - 6, 1, 0, 1, 108, 76, 7, 1, 0, 1, 108, 54, - 8, 1, 0, 1, 108, 76, 1, 1, 0, 1, 112, 76, - 3, 1, 0, 1, 112, 76, 4, 1, 0, 1, 112, 76, - 5, 1, 0, 1, 112, 62, 6, 1, 0, 1, 112, 76, - 7, 1, 0, 1, 112, 54, 8, 1, 0, 1, 112, 76, - 1, 1, 0, 1, 116, 76, 3, 1, 0, 1, 116, 76, - 4, 1, 0, 1, 116, 76, 5, 1, 0, 1, 116, 62, - 6, 1, 0, 1, 116, 76, 7, 1, 0, 1, 116, 54, - 8, 1, 0, 1, 116, 76, 1, 1, 0, 1, 120, 76, - 3, 1, 0, 1, 120, 127, 4, 1, 0, 1, 120, 76, - 5, 1, 0, 1, 120, 127, 6, 1, 0, 1, 120, 76, - 7, 1, 0, 1, 120, 54, 8, 1, 0, 1, 120, 76, - 1, 1, 0, 1, 124, 76, 3, 1, 0, 1, 124, 127, - 4, 1, 0, 1, 124, 76, 5, 1, 0, 1, 124, 127, - 6, 1, 0, 1, 124, 76, 7, 1, 0, 1, 124, 54, - 8, 1, 0, 1, 124, 76, 1, 1, 0, 1, 128, 76, - 3, 1, 0, 1, 128, 127, 4, 1, 0, 1, 128, 76, - 5, 1, 0, 1, 128, 127, 6, 1, 0, 1, 128, 76, - 7, 1, 0, 1, 128, 54, 8, 1, 0, 1, 128, 76, - 1, 1, 0, 1, 132, 76, 3, 1, 0, 1, 132, 76, - 4, 1, 0, 1, 132, 76, 5, 1, 0, 1, 132, 62, - 6, 1, 0, 1, 132, 76, 7, 1, 0, 1, 132, 54, - 8, 1, 0, 1, 132, 76, 1, 1, 0, 1, 136, 76, - 3, 1, 0, 1, 136, 76, 4, 1, 0, 1, 136, 76, - 5, 1, 0, 1, 136, 62, 6, 1, 0, 1, 136, 76, - 7, 1, 0, 1, 136, 127, 8, 1, 0, 1, 136, 76, - 1, 1, 0, 1, 140, 76, 3, 1, 0, 1, 140, 72, - 4, 1, 0, 1, 140, 76, 5, 1, 0, 1, 140, 62, - 6, 1, 0, 1, 140, 72, 7, 1, 0, 1, 140, 127, - 8, 1, 0, 1, 140, 72, 1, 1, 0, 1, 144, 127, - 3, 1, 0, 1, 144, 76, 4, 1, 0, 1, 144, 76, - 5, 1, 0, 1, 144, 127, 6, 1, 0, 1, 144, 76, - 7, 1, 0, 1, 144, 127, 8, 1, 0, 1, 144, 76, - 1, 1, 0, 1, 149, 127, 3, 1, 0, 1, 149, 76, - 4, 1, 0, 1, 149, 74, 5, 1, 0, 1, 149, 76, - 6, 1, 0, 1, 149, 76, 7, 1, 0, 1, 149, 54, - 8, 1, 0, 1, 149, 76, 1, 1, 0, 1, 153, 127, - 3, 1, 0, 1, 153, 76, 4, 1, 0, 1, 153, 74, - 5, 1, 0, 1, 153, 76, 6, 1, 0, 1, 153, 76, - 7, 1, 0, 1, 153, 54, 8, 1, 0, 1, 153, 76, - 1, 1, 0, 1, 157, 127, 3, 1, 0, 1, 157, 76, - 4, 1, 0, 1, 157, 74, 5, 1, 0, 1, 157, 76, - 6, 1, 0, 1, 157, 76, 7, 1, 0, 1, 157, 54, - 8, 1, 0, 1, 157, 76, 1, 1, 0, 1, 161, 127, - 3, 1, 0, 1, 161, 76, 4, 1, 0, 1, 161, 74, - 5, 1, 0, 1, 161, 76, 6, 1, 0, 1, 161, 76, - 7, 1, 0, 1, 161, 54, 8, 1, 0, 1, 161, 76, - 1, 1, 0, 1, 165, 127, 3, 1, 0, 1, 165, 76, - 4, 1, 0, 1, 165, 74, 5, 1, 0, 1, 165, 76, - 6, 1, 0, 1, 165, 76, 7, 1, 0, 1, 165, 54, - 8, 1, 0, 1, 165, 76, 1, 1, 0, 2, 36, 62, - 3, 1, 0, 2, 36, 62, 4, 1, 0, 2, 36, 76, - 5, 1, 0, 2, 36, 62, 6, 1, 0, 2, 36, 64, - 7, 1, 0, 2, 36, 54, 8, 1, 0, 2, 36, 62, - 1, 1, 0, 2, 40, 62, 3, 1, 0, 2, 40, 62, - 4, 1, 0, 2, 40, 76, 5, 1, 0, 2, 40, 62, - 6, 1, 0, 2, 40, 64, 7, 1, 0, 2, 40, 54, - 8, 1, 0, 2, 40, 62, 1, 1, 0, 2, 44, 62, - 3, 1, 0, 2, 44, 62, 4, 1, 0, 2, 44, 76, - 5, 1, 0, 2, 44, 62, 6, 1, 0, 2, 44, 64, - 7, 1, 0, 2, 44, 54, 8, 1, 0, 2, 44, 62, - 1, 1, 0, 2, 48, 62, 3, 1, 0, 2, 48, 62, - 4, 1, 0, 2, 48, 76, 5, 1, 0, 2, 48, 62, - 6, 1, 0, 2, 48, 64, 7, 1, 0, 2, 48, 54, - 8, 1, 0, 2, 48, 62, 1, 1, 0, 2, 52, 62, - 3, 1, 0, 2, 52, 64, 4, 1, 0, 2, 52, 76, - 5, 1, 0, 2, 52, 62, 6, 1, 0, 2, 52, 76, - 7, 1, 0, 2, 52, 54, 8, 1, 0, 2, 52, 76, - 1, 1, 0, 2, 56, 62, 3, 1, 0, 2, 56, 64, - 4, 1, 0, 2, 56, 76, 5, 1, 0, 2, 56, 62, - 6, 1, 0, 2, 56, 76, 7, 1, 0, 2, 56, 54, - 8, 1, 0, 2, 56, 76, 1, 1, 0, 2, 60, 62, - 3, 1, 0, 2, 60, 64, 4, 1, 0, 2, 60, 76, - 5, 1, 0, 2, 60, 62, 6, 1, 0, 2, 60, 76, - 7, 1, 0, 2, 60, 54, 8, 1, 0, 2, 60, 76, - 1, 1, 0, 2, 64, 60, 3, 1, 0, 2, 64, 64, - 4, 1, 0, 2, 64, 74, 5, 1, 0, 2, 64, 62, - 6, 1, 0, 2, 64, 74, 7, 1, 0, 2, 64, 54, - 8, 1, 0, 2, 64, 74, 1, 1, 0, 2, 100, 76, - 3, 1, 0, 2, 100, 70, 4, 1, 0, 2, 100, 76, - 5, 1, 0, 2, 100, 62, 6, 1, 0, 2, 100, 70, - 7, 1, 0, 2, 100, 54, 8, 1, 0, 2, 100, 70, - 1, 1, 0, 2, 104, 76, 3, 1, 0, 2, 104, 76, - 4, 1, 0, 2, 104, 76, 5, 1, 0, 2, 104, 62, - 6, 1, 0, 2, 104, 76, 7, 1, 0, 2, 104, 54, - 8, 1, 0, 2, 104, 76, 1, 1, 0, 2, 108, 76, - 3, 1, 0, 2, 108, 76, 4, 1, 0, 2, 108, 76, - 5, 1, 0, 2, 108, 62, 6, 1, 0, 2, 108, 76, - 7, 1, 0, 2, 108, 54, 8, 1, 0, 2, 108, 76, - 1, 1, 0, 2, 112, 76, 3, 1, 0, 2, 112, 76, - 4, 1, 0, 2, 112, 76, 5, 1, 0, 2, 112, 62, - 6, 1, 0, 2, 112, 76, 7, 1, 0, 2, 112, 54, - 8, 1, 0, 2, 112, 76, 1, 1, 0, 2, 116, 76, - 3, 1, 0, 2, 116, 76, 4, 1, 0, 2, 116, 76, - 5, 1, 0, 2, 116, 62, 6, 1, 0, 2, 116, 76, - 7, 1, 0, 2, 116, 54, 8, 1, 0, 2, 116, 76, - 1, 1, 0, 2, 120, 76, 3, 1, 0, 2, 120, 127, - 4, 1, 0, 2, 120, 76, 5, 1, 0, 2, 120, 127, - 6, 1, 0, 2, 120, 76, 7, 1, 0, 2, 120, 54, - 8, 1, 0, 2, 120, 76, 1, 1, 0, 2, 124, 76, - 3, 1, 0, 2, 124, 127, 4, 1, 0, 2, 124, 76, - 5, 1, 0, 2, 124, 127, 6, 1, 0, 2, 124, 76, - 7, 1, 0, 2, 124, 54, 8, 1, 0, 2, 124, 76, - 1, 1, 0, 2, 128, 76, 3, 1, 0, 2, 128, 127, - 4, 1, 0, 2, 128, 76, 5, 1, 0, 2, 128, 127, - 6, 1, 0, 2, 128, 76, 7, 1, 0, 2, 128, 54, - 8, 1, 0, 2, 128, 76, 1, 1, 0, 2, 132, 76, - 3, 1, 0, 2, 132, 76, 4, 1, 0, 2, 132, 76, - 5, 1, 0, 2, 132, 62, 6, 1, 0, 2, 132, 76, - 7, 1, 0, 2, 132, 54, 8, 1, 0, 2, 132, 76, - 1, 1, 0, 2, 136, 76, 3, 1, 0, 2, 136, 76, - 4, 1, 0, 2, 136, 76, 5, 1, 0, 2, 136, 62, - 6, 1, 0, 2, 136, 76, 7, 1, 0, 2, 136, 127, - 8, 1, 0, 2, 136, 76, 1, 1, 0, 2, 140, 76, - 3, 1, 0, 2, 140, 70, 4, 1, 0, 2, 140, 76, - 5, 1, 0, 2, 140, 62, 6, 1, 0, 2, 140, 70, - 7, 1, 0, 2, 140, 127, 8, 1, 0, 2, 140, 70, - 1, 1, 0, 2, 144, 127, 3, 1, 0, 2, 144, 76, - 4, 1, 0, 2, 144, 76, 5, 1, 0, 2, 144, 127, - 6, 1, 0, 2, 144, 76, 7, 1, 0, 2, 144, 127, - 8, 1, 0, 2, 144, 76, 1, 1, 0, 2, 149, 127, - 3, 1, 0, 2, 149, 76, 4, 1, 0, 2, 149, 74, - 5, 1, 0, 2, 149, 76, 6, 1, 0, 2, 149, 76, - 7, 1, 0, 2, 149, 54, 8, 1, 0, 2, 149, 76, - 1, 1, 0, 2, 153, 127, 3, 1, 0, 2, 153, 76, - 4, 1, 0, 2, 153, 74, 5, 1, 0, 2, 153, 76, - 6, 1, 0, 2, 153, 76, 7, 1, 0, 2, 153, 54, - 8, 1, 0, 2, 153, 76, 1, 1, 0, 2, 157, 127, - 3, 1, 0, 2, 157, 76, 4, 1, 0, 2, 157, 74, - 5, 1, 0, 2, 157, 76, 6, 1, 0, 2, 157, 76, - 7, 1, 0, 2, 157, 54, 8, 1, 0, 2, 157, 76, - 1, 1, 0, 2, 161, 127, 3, 1, 0, 2, 161, 76, - 4, 1, 0, 2, 161, 74, 5, 1, 0, 2, 161, 76, - 6, 1, 0, 2, 161, 76, 7, 1, 0, 2, 161, 54, - 8, 1, 0, 2, 161, 76, 1, 1, 0, 2, 165, 127, - 3, 1, 0, 2, 165, 76, 4, 1, 0, 2, 165, 74, - 5, 1, 0, 2, 165, 76, 6, 1, 0, 2, 165, 76, - 7, 1, 0, 2, 165, 54, 8, 1, 0, 2, 165, 76, - 1, 1, 0, 3, 36, 50, 3, 1, 0, 3, 36, 38, - 4, 1, 0, 3, 36, 66, 5, 1, 0, 3, 36, 38, - 6, 1, 0, 3, 36, 52, 7, 1, 0, 3, 36, 30, - 8, 1, 0, 3, 36, 50, 1, 1, 0, 3, 40, 50, - 3, 1, 0, 3, 40, 38, 4, 1, 0, 3, 40, 66, - 5, 1, 0, 3, 40, 38, 6, 1, 0, 3, 40, 52, - 7, 1, 0, 3, 40, 30, 8, 1, 0, 3, 40, 50, - 1, 1, 0, 3, 44, 50, 3, 1, 0, 3, 44, 38, - 4, 1, 0, 3, 44, 66, 5, 1, 0, 3, 44, 38, - 6, 1, 0, 3, 44, 52, 7, 1, 0, 3, 44, 30, - 8, 1, 0, 3, 44, 50, 1, 1, 0, 3, 48, 50, - 3, 1, 0, 3, 48, 38, 4, 1, 0, 3, 48, 66, - 5, 1, 0, 3, 48, 38, 6, 1, 0, 3, 48, 52, - 7, 1, 0, 3, 48, 30, 8, 1, 0, 3, 48, 50, - 1, 1, 0, 3, 52, 50, 3, 1, 0, 3, 52, 40, - 4, 1, 0, 3, 52, 66, 5, 1, 0, 3, 52, 38, - 6, 1, 0, 3, 52, 68, 7, 1, 0, 3, 52, 30, - 8, 1, 0, 3, 52, 68, 1, 1, 0, 3, 56, 50, - 3, 1, 0, 3, 56, 40, 4, 1, 0, 3, 56, 66, - 5, 1, 0, 3, 56, 38, 6, 1, 0, 3, 56, 68, - 7, 1, 0, 3, 56, 30, 8, 1, 0, 3, 56, 68, - 1, 1, 0, 3, 60, 50, 3, 1, 0, 3, 60, 40, - 4, 1, 0, 3, 60, 66, 5, 1, 0, 3, 60, 38, - 6, 1, 0, 3, 60, 66, 7, 1, 0, 3, 60, 30, - 8, 1, 0, 3, 60, 66, 1, 1, 0, 3, 64, 50, - 3, 1, 0, 3, 64, 40, 4, 1, 0, 3, 64, 66, - 5, 1, 0, 3, 64, 38, 6, 1, 0, 3, 64, 68, - 7, 1, 0, 3, 64, 30, 8, 1, 0, 3, 64, 68, - 1, 1, 0, 3, 100, 70, 3, 1, 0, 3, 100, 60, - 4, 1, 0, 3, 100, 64, 5, 1, 0, 3, 100, 38, - 6, 1, 0, 3, 100, 60, 7, 1, 0, 3, 100, 30, - 8, 1, 0, 3, 100, 60, 1, 1, 0, 3, 104, 70, - 3, 1, 0, 3, 104, 68, 4, 1, 0, 3, 104, 64, - 5, 1, 0, 3, 104, 38, 6, 1, 0, 3, 104, 68, - 7, 1, 0, 3, 104, 30, 8, 1, 0, 3, 104, 68, - 1, 1, 0, 3, 108, 70, 3, 1, 0, 3, 108, 68, - 4, 1, 0, 3, 108, 64, 5, 1, 0, 3, 108, 38, - 6, 1, 0, 3, 108, 68, 7, 1, 0, 3, 108, 30, - 8, 1, 0, 3, 108, 68, 1, 1, 0, 3, 112, 70, - 3, 1, 0, 3, 112, 68, 4, 1, 0, 3, 112, 64, - 5, 1, 0, 3, 112, 38, 6, 1, 0, 3, 112, 68, - 7, 1, 0, 3, 112, 30, 8, 1, 0, 3, 112, 68, - 1, 1, 0, 3, 116, 70, 3, 1, 0, 3, 116, 68, - 4, 1, 0, 3, 116, 64, 5, 1, 0, 3, 116, 38, - 6, 1, 0, 3, 116, 68, 7, 1, 0, 3, 116, 30, - 8, 1, 0, 3, 116, 68, 1, 1, 0, 3, 120, 70, - 3, 1, 0, 3, 120, 127, 4, 1, 0, 3, 120, 64, - 5, 1, 0, 3, 120, 127, 6, 1, 0, 3, 120, 68, - 7, 1, 0, 3, 120, 30, 8, 1, 0, 3, 120, 68, - 1, 1, 0, 3, 124, 70, 3, 1, 0, 3, 124, 127, - 4, 1, 0, 3, 124, 64, 5, 1, 0, 3, 124, 127, - 6, 1, 0, 3, 124, 68, 7, 1, 0, 3, 124, 30, - 8, 1, 0, 3, 124, 68, 1, 1, 0, 3, 128, 70, - 3, 1, 0, 3, 128, 127, 4, 1, 0, 3, 128, 64, - 5, 1, 0, 3, 128, 127, 6, 1, 0, 3, 128, 68, - 7, 1, 0, 3, 128, 30, 8, 1, 0, 3, 128, 68, - 1, 1, 0, 3, 132, 70, 3, 1, 0, 3, 132, 68, - 4, 1, 0, 3, 132, 64, 5, 1, 0, 3, 132, 38, - 6, 1, 0, 3, 132, 68, 7, 1, 0, 3, 132, 30, - 8, 1, 0, 3, 132, 68, 1, 1, 0, 3, 136, 70, - 3, 1, 0, 3, 136, 68, 4, 1, 0, 3, 136, 64, - 5, 1, 0, 3, 136, 38, 6, 1, 0, 3, 136, 68, - 7, 1, 0, 3, 136, 127, 8, 1, 0, 3, 136, 68, - 1, 1, 0, 3, 140, 70, 3, 1, 0, 3, 140, 60, - 4, 1, 0, 3, 140, 64, 5, 1, 0, 3, 140, 38, - 6, 1, 0, 3, 140, 60, 7, 1, 0, 3, 140, 127, - 8, 1, 0, 3, 140, 60, 1, 1, 0, 3, 144, 127, - 3, 1, 0, 3, 144, 68, 4, 1, 0, 3, 144, 64, - 5, 1, 0, 3, 144, 127, 6, 1, 0, 3, 144, 68, - 7, 1, 0, 3, 144, 127, 8, 1, 0, 3, 144, 68, - 1, 1, 0, 3, 149, 127, 3, 1, 0, 3, 149, 76, - 4, 1, 0, 3, 149, 60, 5, 1, 0, 3, 149, 76, - 6, 1, 0, 3, 149, 76, 7, 1, 0, 3, 149, 30, - 8, 1, 0, 3, 149, 72, 1, 1, 0, 3, 153, 127, - 3, 1, 0, 3, 153, 76, 4, 1, 0, 3, 153, 60, - 5, 1, 0, 3, 153, 76, 6, 1, 0, 3, 153, 76, - 7, 1, 0, 3, 153, 30, 8, 1, 0, 3, 153, 76, - 1, 1, 0, 3, 157, 127, 3, 1, 0, 3, 157, 76, - 4, 1, 0, 3, 157, 60, 5, 1, 0, 3, 157, 76, - 6, 1, 0, 3, 157, 76, 7, 1, 0, 3, 157, 30, - 8, 1, 0, 3, 157, 76, 1, 1, 0, 3, 161, 127, - 3, 1, 0, 3, 161, 76, 4, 1, 0, 3, 161, 60, - 5, 1, 0, 3, 161, 76, 6, 1, 0, 3, 161, 76, - 7, 1, 0, 3, 161, 30, 8, 1, 0, 3, 161, 76, - 1, 1, 0, 3, 165, 127, 3, 1, 0, 3, 165, 76, - 4, 1, 0, 3, 165, 60, 5, 1, 0, 3, 165, 76, - 6, 1, 0, 3, 165, 76, 7, 1, 0, 3, 165, 30, - 8, 1, 0, 3, 165, 76, 1, 1, 1, 2, 38, 62, - 3, 1, 1, 2, 38, 64, 4, 1, 1, 2, 38, 72, - 5, 1, 1, 2, 38, 64, 6, 1, 1, 2, 38, 64, - 7, 1, 1, 2, 38, 54, 8, 1, 1, 2, 38, 62, - 1, 1, 1, 2, 46, 62, 3, 1, 1, 2, 46, 64, - 4, 1, 1, 2, 46, 72, 5, 1, 1, 2, 46, 64, - 6, 1, 1, 2, 46, 64, 7, 1, 1, 2, 46, 54, - 8, 1, 1, 2, 46, 62, 1, 1, 1, 2, 54, 62, - 3, 1, 1, 2, 54, 64, 4, 1, 1, 2, 54, 72, - 5, 1, 1, 2, 54, 64, 6, 1, 1, 2, 54, 72, - 7, 1, 1, 2, 54, 54, 8, 1, 1, 2, 54, 72, - 1, 1, 1, 2, 62, 62, 3, 1, 1, 2, 62, 64, - 4, 1, 1, 2, 62, 70, 5, 1, 1, 2, 62, 64, - 6, 1, 1, 2, 62, 64, 7, 1, 1, 2, 62, 54, - 8, 1, 1, 2, 62, 64, 1, 1, 1, 2, 102, 72, - 3, 1, 1, 2, 102, 58, 4, 1, 1, 2, 102, 72, - 5, 1, 1, 2, 102, 64, 6, 1, 1, 2, 102, 58, - 7, 1, 1, 2, 102, 54, 8, 1, 1, 2, 102, 58, - 1, 1, 1, 2, 110, 72, 3, 1, 1, 2, 110, 72, - 4, 1, 1, 2, 110, 72, 5, 1, 1, 2, 110, 64, - 6, 1, 1, 2, 110, 72, 7, 1, 1, 2, 110, 54, - 8, 1, 1, 2, 110, 72, 1, 1, 1, 2, 118, 72, - 3, 1, 1, 2, 118, 127, 4, 1, 1, 2, 118, 72, - 5, 1, 1, 2, 118, 127, 6, 1, 1, 2, 118, 72, - 7, 1, 1, 2, 118, 54, 8, 1, 1, 2, 118, 72, - 1, 1, 1, 2, 126, 72, 3, 1, 1, 2, 126, 127, - 4, 1, 1, 2, 126, 72, 5, 1, 1, 2, 126, 127, - 6, 1, 1, 2, 126, 72, 7, 1, 1, 2, 126, 54, - 8, 1, 1, 2, 126, 72, 1, 1, 1, 2, 134, 72, - 3, 1, 1, 2, 134, 72, 4, 1, 1, 2, 134, 72, - 5, 1, 1, 2, 134, 64, 6, 1, 1, 2, 134, 72, - 7, 1, 1, 2, 134, 127, 8, 1, 1, 2, 134, 72, - 1, 1, 1, 2, 142, 127, 3, 1, 1, 2, 142, 72, - 4, 1, 1, 2, 142, 72, 5, 1, 1, 2, 142, 127, - 6, 1, 1, 2, 142, 72, 7, 1, 1, 2, 142, 127, - 8, 1, 1, 2, 142, 72, 1, 1, 1, 2, 151, 127, - 3, 1, 1, 2, 151, 72, 4, 1, 1, 2, 151, 72, - 5, 1, 1, 2, 151, 72, 6, 1, 1, 2, 151, 72, - 7, 1, 1, 2, 151, 54, 8, 1, 1, 2, 151, 72, - 1, 1, 1, 2, 159, 127, 3, 1, 1, 2, 159, 72, - 4, 1, 1, 2, 159, 72, 5, 1, 1, 2, 159, 72, - 6, 1, 1, 2, 159, 72, 7, 1, 1, 2, 159, 54, - 8, 1, 1, 2, 159, 72, 1, 1, 1, 3, 38, 50, - 3, 1, 1, 3, 38, 40, 4, 1, 1, 3, 38, 62, - 5, 1, 1, 3, 38, 40, 6, 1, 1, 3, 38, 52, - 7, 1, 1, 3, 38, 30, 8, 1, 1, 3, 38, 50, - 1, 1, 1, 3, 46, 50, 3, 1, 1, 3, 46, 40, - 4, 1, 1, 3, 46, 62, 5, 1, 1, 3, 46, 40, - 6, 1, 1, 3, 46, 52, 7, 1, 1, 3, 46, 30, - 8, 1, 1, 3, 46, 50, 1, 1, 1, 3, 54, 50, - 3, 1, 1, 3, 54, 40, 4, 1, 1, 3, 54, 62, - 5, 1, 1, 3, 54, 40, 6, 1, 1, 3, 54, 68, - 7, 1, 1, 3, 54, 30, 8, 1, 1, 3, 54, 68, - 1, 1, 1, 3, 62, 48, 3, 1, 1, 3, 62, 40, - 4, 1, 1, 3, 62, 58, 5, 1, 1, 3, 62, 40, - 6, 1, 1, 3, 62, 58, 7, 1, 1, 3, 62, 30, - 8, 1, 1, 3, 62, 58, 1, 1, 1, 3, 102, 70, - 3, 1, 1, 3, 102, 54, 4, 1, 1, 3, 102, 64, - 5, 1, 1, 3, 102, 40, 6, 1, 1, 3, 102, 54, - 7, 1, 1, 3, 102, 30, 8, 1, 1, 3, 102, 54, - 1, 1, 1, 3, 110, 70, 3, 1, 1, 3, 110, 68, - 4, 1, 1, 3, 110, 64, 5, 1, 1, 3, 110, 40, - 6, 1, 1, 3, 110, 68, 7, 1, 1, 3, 110, 30, - 8, 1, 1, 3, 110, 68, 1, 1, 1, 3, 118, 70, - 3, 1, 1, 3, 118, 127, 4, 1, 1, 3, 118, 64, - 5, 1, 1, 3, 118, 127, 6, 1, 1, 3, 118, 68, - 7, 1, 1, 3, 118, 30, 8, 1, 1, 3, 118, 68, - 1, 1, 1, 3, 126, 70, 3, 1, 1, 3, 126, 127, - 4, 1, 1, 3, 126, 64, 5, 1, 1, 3, 126, 127, - 6, 1, 1, 3, 126, 68, 7, 1, 1, 3, 126, 30, - 8, 1, 1, 3, 126, 68, 1, 1, 1, 3, 134, 70, - 3, 1, 1, 3, 134, 68, 4, 1, 1, 3, 134, 64, - 5, 1, 1, 3, 134, 40, 6, 1, 1, 3, 134, 68, - 7, 1, 1, 3, 134, 127, 8, 1, 1, 3, 134, 68, - 1, 1, 1, 3, 142, 127, 3, 1, 1, 3, 142, 68, - 4, 1, 1, 3, 142, 64, 5, 1, 1, 3, 142, 127, - 6, 1, 1, 3, 142, 68, 7, 1, 1, 3, 142, 127, - 8, 1, 1, 3, 142, 68, 1, 1, 1, 3, 151, 127, - 3, 1, 1, 3, 151, 72, 4, 1, 1, 3, 151, 66, - 5, 1, 1, 3, 151, 72, 6, 1, 1, 3, 151, 72, - 7, 1, 1, 3, 151, 30, 8, 1, 1, 3, 151, 68, - 1, 1, 1, 3, 159, 127, 3, 1, 1, 3, 159, 72, - 4, 1, 1, 3, 159, 66, 5, 1, 1, 3, 159, 72, - 6, 1, 1, 3, 159, 72, 7, 1, 1, 3, 159, 30, - 8, 1, 1, 3, 159, 72, 1, 1, 2, 4, 42, 64, - 3, 1, 2, 4, 42, 64, 4, 1, 2, 4, 42, 68, - 5, 1, 2, 4, 42, 64, 6, 1, 2, 4, 42, 64, - 7, 1, 2, 4, 42, 54, 8, 1, 2, 4, 42, 62, - 1, 1, 2, 4, 58, 64, 3, 1, 2, 4, 58, 62, - 4, 1, 2, 4, 58, 64, 5, 1, 2, 4, 58, 64, - 6, 1, 2, 4, 58, 62, 7, 1, 2, 4, 58, 54, - 8, 1, 2, 4, 58, 62, 1, 1, 2, 4, 106, 72, - 3, 1, 2, 4, 106, 58, 4, 1, 2, 4, 106, 66, - 5, 1, 2, 4, 106, 64, 6, 1, 2, 4, 106, 58, - 7, 1, 2, 4, 106, 54, 8, 1, 2, 4, 106, 58, - 1, 1, 2, 4, 122, 72, 3, 1, 2, 4, 122, 127, - 4, 1, 2, 4, 122, 68, 5, 1, 2, 4, 122, 127, - 6, 1, 2, 4, 122, 72, 7, 1, 2, 4, 122, 54, - 8, 1, 2, 4, 122, 72, 1, 1, 2, 4, 138, 127, - 3, 1, 2, 4, 138, 72, 4, 1, 2, 4, 138, 68, - 5, 1, 2, 4, 138, 127, 6, 1, 2, 4, 138, 72, - 7, 1, 2, 4, 138, 127, 8, 1, 2, 4, 138, 72, - 1, 1, 2, 4, 155, 127, 3, 1, 2, 4, 155, 72, - 4, 1, 2, 4, 155, 68, 5, 1, 2, 4, 155, 72, - 6, 1, 2, 4, 155, 72, 7, 1, 2, 4, 155, 54, - 8, 1, 2, 4, 155, 68, 1, 1, 2, 5, 42, 50, - 3, 1, 2, 5, 42, 40, 4, 1, 2, 5, 42, 58, - 5, 1, 2, 5, 42, 40, 6, 1, 2, 5, 42, 52, - 7, 1, 2, 5, 42, 30, 8, 1, 2, 5, 42, 50, - 1, 1, 2, 5, 58, 50, 3, 1, 2, 5, 58, 40, - 4, 1, 2, 5, 58, 56, 5, 1, 2, 5, 58, 40, - 6, 1, 2, 5, 58, 52, 7, 1, 2, 5, 58, 30, - 8, 1, 2, 5, 58, 52, 1, 1, 2, 5, 106, 72, - 3, 1, 2, 5, 106, 50, 4, 1, 2, 5, 106, 56, - 5, 1, 2, 5, 106, 40, 6, 1, 2, 5, 106, 50, - 7, 1, 2, 5, 106, 30, 8, 1, 2, 5, 106, 50, - 1, 1, 2, 5, 122, 72, 3, 1, 2, 5, 122, 127, - 4, 1, 2, 5, 122, 56, 5, 1, 2, 5, 122, 127, - 6, 1, 2, 5, 122, 66, 7, 1, 2, 5, 122, 30, - 8, 1, 2, 5, 122, 66, 1, 1, 2, 5, 138, 127, - 3, 1, 2, 5, 138, 66, 4, 1, 2, 5, 138, 58, - 5, 1, 2, 5, 138, 127, 6, 1, 2, 5, 138, 66, - 7, 1, 2, 5, 138, 127, 8, 1, 2, 5, 138, 66, - 1, 1, 2, 5, 155, 127, 3, 1, 2, 5, 155, 62, - 4, 1, 2, 5, 155, 58, 5, 1, 2, 5, 155, 72, - 6, 1, 2, 5, 155, 62, 7, 1, 2, 5, 155, 30, - 8, 1, 2, 5, 155, 62 +static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { + { 0, 0, 0, 0, 1, 72, }, + { 2, 0, 0, 0, 1, 60, }, + { 0, 0, 0, 0, 2, 72, }, + { 2, 0, 0, 0, 2, 60, }, + { 0, 0, 0, 0, 3, 76, }, + { 2, 0, 0, 0, 3, 60, }, + { 0, 0, 0, 0, 4, 76, }, + { 2, 0, 0, 0, 4, 60, }, + { 0, 0, 0, 0, 5, 76, }, + { 2, 0, 0, 0, 5, 60, }, + { 0, 0, 0, 0, 6, 76, }, + { 2, 0, 0, 0, 6, 60, }, + { 0, 0, 0, 0, 7, 76, }, + { 2, 0, 0, 0, 7, 60, }, + { 0, 0, 0, 0, 8, 76, }, + { 2, 0, 0, 0, 8, 60, }, + { 0, 0, 0, 0, 9, 76, }, + { 2, 0, 0, 0, 9, 60, }, + { 0, 0, 0, 0, 10, 72, }, + { 2, 0, 0, 0, 10, 60, }, + { 0, 0, 0, 0, 11, 72, }, + { 2, 0, 0, 0, 11, 60, }, + { 0, 0, 0, 0, 12, 52, }, + { 2, 0, 0, 0, 12, 60, }, + { 0, 0, 0, 0, 13, 48, }, + { 2, 0, 0, 0, 13, 60, }, + { 0, 0, 0, 0, 14, 127, }, + { 2, 0, 0, 0, 14, 127, }, + { 0, 0, 0, 1, 1, 52, }, + { 2, 0, 0, 1, 1, 60, }, + { 0, 0, 0, 1, 2, 60, }, + { 2, 0, 0, 1, 2, 60, }, + { 0, 0, 0, 1, 3, 64, }, + { 2, 0, 0, 1, 3, 60, }, + { 0, 0, 0, 1, 4, 68, }, + { 2, 0, 0, 1, 4, 60, }, + { 0, 0, 0, 1, 5, 76, }, + { 2, 0, 0, 1, 5, 60, }, + { 0, 0, 0, 1, 6, 76, }, + { 2, 0, 0, 1, 6, 60, }, + { 0, 0, 0, 1, 7, 76, }, + { 2, 0, 0, 1, 7, 60, }, + { 0, 0, 0, 1, 8, 68, }, + { 2, 0, 0, 1, 8, 60, }, + { 0, 0, 0, 1, 9, 64, }, + { 2, 0, 0, 1, 9, 60, }, + { 0, 0, 0, 1, 10, 60, }, + { 2, 0, 0, 1, 10, 60, }, + { 0, 0, 0, 1, 11, 52, }, + { 2, 0, 0, 1, 11, 60, }, + { 0, 0, 0, 1, 12, 40, }, + { 2, 0, 0, 1, 12, 60, }, + { 0, 0, 0, 1, 13, 28, }, + { 2, 0, 0, 1, 13, 60, }, + { 0, 0, 0, 1, 14, 127, }, + { 2, 0, 0, 1, 14, 127, }, + { 0, 0, 0, 2, 1, 52, }, + { 2, 0, 0, 2, 1, 60, }, + { 0, 0, 0, 2, 2, 60, }, + { 2, 0, 0, 2, 2, 60, }, + { 0, 0, 0, 2, 3, 64, }, + { 2, 0, 0, 2, 3, 60, }, + { 0, 0, 0, 2, 4, 68, }, + { 2, 0, 0, 2, 4, 60, }, + { 0, 0, 0, 2, 5, 76, }, + { 2, 0, 0, 2, 5, 60, }, + { 0, 0, 0, 2, 6, 76, }, + { 2, 0, 0, 2, 6, 60, }, + { 0, 0, 0, 2, 7, 76, }, + { 2, 0, 0, 2, 7, 60, }, + { 0, 0, 0, 2, 8, 68, }, + { 2, 0, 0, 2, 8, 60, }, + { 0, 0, 0, 2, 9, 64, }, + { 2, 0, 0, 2, 9, 60, }, + { 0, 0, 0, 2, 10, 60, }, + { 2, 0, 0, 2, 10, 60, }, + { 0, 0, 0, 2, 11, 52, }, + { 2, 0, 0, 2, 11, 60, }, + { 0, 0, 0, 2, 12, 40, }, + { 2, 0, 0, 2, 12, 60, }, + { 0, 0, 0, 2, 13, 28, }, + { 2, 0, 0, 2, 13, 60, }, + { 0, 0, 0, 2, 14, 127, }, + { 2, 0, 0, 2, 14, 127, }, + { 0, 0, 0, 3, 1, 52, }, + { 2, 0, 0, 3, 1, 36, }, + { 0, 0, 0, 3, 2, 60, }, + { 2, 0, 0, 3, 2, 36, }, + { 0, 0, 0, 3, 3, 64, }, + { 2, 0, 0, 3, 3, 36, }, + { 0, 0, 0, 3, 4, 68, }, + { 2, 0, 0, 3, 4, 36, }, + { 0, 0, 0, 3, 5, 76, }, + { 2, 0, 0, 3, 5, 36, }, + { 0, 0, 0, 3, 6, 76, }, + { 2, 0, 0, 3, 6, 36, }, + { 0, 0, 0, 3, 7, 76, }, + { 2, 0, 0, 3, 7, 36, }, + { 0, 0, 0, 3, 8, 68, }, + { 2, 0, 0, 3, 8, 36, }, + { 0, 0, 0, 3, 9, 64, }, + { 2, 0, 0, 3, 9, 36, }, + { 0, 0, 0, 3, 10, 60, }, + { 2, 0, 0, 3, 10, 36, }, + { 0, 0, 0, 3, 11, 52, }, + { 2, 0, 0, 3, 11, 36, }, + { 0, 0, 0, 3, 12, 40, }, + { 2, 0, 0, 3, 12, 36, }, + { 0, 0, 0, 3, 13, 28, }, + { 2, 0, 0, 3, 13, 36, }, + { 0, 0, 0, 3, 14, 127, }, + { 2, 0, 0, 3, 14, 127, }, + { 0, 0, 1, 2, 1, 127, }, + { 2, 0, 1, 2, 1, 127, }, + { 0, 0, 1, 2, 2, 127, }, + { 2, 0, 1, 2, 2, 127, }, + { 0, 0, 1, 2, 3, 52, }, + { 2, 0, 1, 2, 3, 60, }, + { 0, 0, 1, 2, 4, 52, }, + { 2, 0, 1, 2, 4, 60, }, + { 0, 0, 1, 2, 5, 60, }, + { 2, 0, 1, 2, 5, 60, }, + { 0, 0, 1, 2, 6, 64, }, + { 2, 0, 1, 2, 6, 60, }, + { 0, 0, 1, 2, 7, 60, }, + { 2, 0, 1, 2, 7, 60, }, + { 0, 0, 1, 2, 8, 52, }, + { 2, 0, 1, 2, 8, 60, }, + { 0, 0, 1, 2, 9, 52, }, + { 2, 0, 1, 2, 9, 60, }, + { 0, 0, 1, 2, 10, 40, }, + { 2, 0, 1, 2, 10, 60, }, + { 0, 0, 1, 2, 11, 28, }, + { 2, 0, 1, 2, 11, 60, }, + { 0, 0, 1, 2, 12, 127, }, + { 2, 0, 1, 2, 12, 127, }, + { 0, 0, 1, 2, 13, 127, }, + { 2, 0, 1, 2, 13, 127, }, + { 0, 0, 1, 2, 14, 127, }, + { 2, 0, 1, 2, 14, 127, }, + { 0, 0, 1, 3, 1, 127, }, + { 2, 0, 1, 3, 1, 127, }, + { 0, 0, 1, 3, 2, 127, }, + { 2, 0, 1, 3, 2, 127, }, + { 0, 0, 1, 3, 3, 48, }, + { 2, 0, 1, 3, 3, 36, }, + { 0, 0, 1, 3, 4, 48, }, + { 2, 0, 1, 3, 4, 36, }, + { 0, 0, 1, 3, 5, 60, }, + { 2, 0, 1, 3, 5, 36, }, + { 0, 0, 1, 3, 6, 64, }, + { 2, 0, 1, 3, 6, 36, }, + { 0, 0, 1, 3, 7, 60, }, + { 2, 0, 1, 3, 7, 36, }, + { 0, 0, 1, 3, 8, 52, }, + { 2, 0, 1, 3, 8, 36, }, + { 0, 0, 1, 3, 9, 52, }, + { 2, 0, 1, 3, 9, 36, }, + { 0, 0, 1, 3, 10, 40, }, + { 2, 0, 1, 3, 10, 36, }, + { 0, 0, 1, 3, 11, 26, }, + { 2, 0, 1, 3, 11, 36, }, + { 0, 0, 1, 3, 12, 127, }, + { 2, 0, 1, 3, 12, 127, }, + { 0, 0, 1, 3, 13, 127, }, + { 2, 0, 1, 3, 13, 127, }, + { 0, 0, 1, 3, 14, 127, }, + { 2, 0, 1, 3, 14, 127, }, + { 0, 1, 0, 1, 36, 74, }, + { 2, 1, 0, 1, 36, 62, }, + { 0, 1, 0, 1, 40, 76, }, + { 2, 1, 0, 1, 40, 62, }, + { 0, 1, 0, 1, 44, 76, }, + { 2, 1, 0, 1, 44, 62, }, + { 0, 1, 0, 1, 48, 76, }, + { 2, 1, 0, 1, 48, 62, }, + { 0, 1, 0, 1, 52, 76, }, + { 2, 1, 0, 1, 52, 62, }, + { 0, 1, 0, 1, 56, 76, }, + { 2, 1, 0, 1, 56, 62, }, + { 0, 1, 0, 1, 60, 76, }, + { 2, 1, 0, 1, 60, 62, }, + { 0, 1, 0, 1, 64, 74, }, + { 2, 1, 0, 1, 64, 62, }, + { 0, 1, 0, 1, 100, 72, }, + { 2, 1, 0, 1, 100, 62, }, + { 0, 1, 0, 1, 104, 76, }, + { 2, 1, 0, 1, 104, 62, }, + { 0, 1, 0, 1, 108, 76, }, + { 2, 1, 0, 1, 108, 62, }, + { 0, 1, 0, 1, 112, 76, }, + { 2, 1, 0, 1, 112, 62, }, + { 0, 1, 0, 1, 116, 76, }, + { 2, 1, 0, 1, 116, 62, }, + { 0, 1, 0, 1, 120, 76, }, + { 2, 1, 0, 1, 120, 62, }, + { 0, 1, 0, 1, 124, 76, }, + { 2, 1, 0, 1, 124, 62, }, + { 0, 1, 0, 1, 128, 76, }, + { 2, 1, 0, 1, 128, 62, }, + { 0, 1, 0, 1, 132, 76, }, + { 2, 1, 0, 1, 132, 62, }, + { 0, 1, 0, 1, 136, 76, }, + { 2, 1, 0, 1, 136, 62, }, + { 0, 1, 0, 1, 140, 72, }, + { 2, 1, 0, 1, 140, 62, }, + { 0, 1, 0, 1, 144, 76, }, + { 2, 1, 0, 1, 144, 127, }, + { 0, 1, 0, 1, 149, 76, }, + { 2, 1, 0, 1, 149, -128, }, + { 0, 1, 0, 1, 153, 76, }, + { 2, 1, 0, 1, 153, -128, }, + { 0, 1, 0, 1, 157, 76, }, + { 2, 1, 0, 1, 157, -128, }, + { 0, 1, 0, 1, 161, 76, }, + { 2, 1, 0, 1, 161, -128, }, + { 0, 1, 0, 1, 165, 76, }, + { 2, 1, 0, 1, 165, -128, }, + { 0, 1, 0, 2, 36, 72, }, + { 2, 1, 0, 2, 36, 62, }, + { 0, 1, 0, 2, 40, 76, }, + { 2, 1, 0, 2, 40, 62, }, + { 0, 1, 0, 2, 44, 76, }, + { 2, 1, 0, 2, 44, 62, }, + { 0, 1, 0, 2, 48, 76, }, + { 2, 1, 0, 2, 48, 62, }, + { 0, 1, 0, 2, 52, 76, }, + { 2, 1, 0, 2, 52, 62, }, + { 0, 1, 0, 2, 56, 76, }, + { 2, 1, 0, 2, 56, 62, }, + { 0, 1, 0, 2, 60, 76, }, + { 2, 1, 0, 2, 60, 62, }, + { 0, 1, 0, 2, 64, 74, }, + { 2, 1, 0, 2, 64, 62, }, + { 0, 1, 0, 2, 100, 70, }, + { 2, 1, 0, 2, 100, 62, }, + { 0, 1, 0, 2, 104, 76, }, + { 2, 1, 0, 2, 104, 62, }, + { 0, 1, 0, 2, 108, 76, }, + { 2, 1, 0, 2, 108, 62, }, + { 0, 1, 0, 2, 112, 76, }, + { 2, 1, 0, 2, 112, 62, }, + { 0, 1, 0, 2, 116, 76, }, + { 2, 1, 0, 2, 116, 62, }, + { 0, 1, 0, 2, 120, 76, }, + { 2, 1, 0, 2, 120, 62, }, + { 0, 1, 0, 2, 124, 76, }, + { 2, 1, 0, 2, 124, 62, }, + { 0, 1, 0, 2, 128, 76, }, + { 2, 1, 0, 2, 128, 62, }, + { 0, 1, 0, 2, 132, 76, }, + { 2, 1, 0, 2, 132, 62, }, + { 0, 1, 0, 2, 136, 76, }, + { 2, 1, 0, 2, 136, 62, }, + { 0, 1, 0, 2, 140, 70, }, + { 2, 1, 0, 2, 140, 62, }, + { 0, 1, 0, 2, 144, 76, }, + { 2, 1, 0, 2, 144, 127, }, + { 0, 1, 0, 2, 149, 76, }, + { 2, 1, 0, 2, 149, -128, }, + { 0, 1, 0, 2, 153, 76, }, + { 2, 1, 0, 2, 153, -128, }, + { 0, 1, 0, 2, 157, 76, }, + { 2, 1, 0, 2, 157, -128, }, + { 0, 1, 0, 2, 161, 76, }, + { 2, 1, 0, 2, 161, -128, }, + { 0, 1, 0, 2, 165, 76, }, + { 2, 1, 0, 2, 165, -128, }, + { 0, 1, 0, 3, 36, 68, }, + { 2, 1, 0, 3, 36, 38, }, + { 0, 1, 0, 3, 40, 68, }, + { 2, 1, 0, 3, 40, 38, }, + { 0, 1, 0, 3, 44, 68, }, + { 2, 1, 0, 3, 44, 38, }, + { 0, 1, 0, 3, 48, 68, }, + { 2, 1, 0, 3, 48, 38, }, + { 0, 1, 0, 3, 52, 68, }, + { 2, 1, 0, 3, 52, 38, }, + { 0, 1, 0, 3, 56, 68, }, + { 2, 1, 0, 3, 56, 38, }, + { 0, 1, 0, 3, 60, 66, }, + { 2, 1, 0, 3, 60, 38, }, + { 0, 1, 0, 3, 64, 68, }, + { 2, 1, 0, 3, 64, 38, }, + { 0, 1, 0, 3, 100, 60, }, + { 2, 1, 0, 3, 100, 38, }, + { 0, 1, 0, 3, 104, 68, }, + { 2, 1, 0, 3, 104, 38, }, + { 0, 1, 0, 3, 108, 68, }, + { 2, 1, 0, 3, 108, 38, }, + { 0, 1, 0, 3, 112, 68, }, + { 2, 1, 0, 3, 112, 38, }, + { 0, 1, 0, 3, 116, 68, }, + { 2, 1, 0, 3, 116, 38, }, + { 0, 1, 0, 3, 120, 68, }, + { 2, 1, 0, 3, 120, 38, }, + { 0, 1, 0, 3, 124, 68, }, + { 2, 1, 0, 3, 124, 38, }, + { 0, 1, 0, 3, 128, 68, }, + { 2, 1, 0, 3, 128, 38, }, + { 0, 1, 0, 3, 132, 68, }, + { 2, 1, 0, 3, 132, 38, }, + { 0, 1, 0, 3, 136, 68, }, + { 2, 1, 0, 3, 136, 38, }, + { 0, 1, 0, 3, 140, 60, }, + { 2, 1, 0, 3, 140, 38, }, + { 0, 1, 0, 3, 144, 68, }, + { 2, 1, 0, 3, 144, 127, }, + { 0, 1, 0, 3, 149, 76, }, + { 2, 1, 0, 3, 149, -128, }, + { 0, 1, 0, 3, 153, 76, }, + { 2, 1, 0, 3, 153, -128, }, + { 0, 1, 0, 3, 157, 76, }, + { 2, 1, 0, 3, 157, -128, }, + { 0, 1, 0, 3, 161, 76, }, + { 2, 1, 0, 3, 161, -128, }, + { 0, 1, 0, 3, 165, 76, }, + { 2, 1, 0, 3, 165, -128, }, + { 0, 1, 1, 2, 38, 66, }, + { 2, 1, 1, 2, 38, 64, }, + { 0, 1, 1, 2, 46, 72, }, + { 2, 1, 1, 2, 46, 64, }, + { 0, 1, 1, 2, 54, 72, }, + { 2, 1, 1, 2, 54, 64, }, + { 0, 1, 1, 2, 62, 64, }, + { 2, 1, 1, 2, 62, 64, }, + { 0, 1, 1, 2, 102, 58, }, + { 2, 1, 1, 2, 102, 64, }, + { 0, 1, 1, 2, 110, 72, }, + { 2, 1, 1, 2, 110, 64, }, + { 0, 1, 1, 2, 118, 72, }, + { 2, 1, 1, 2, 118, 64, }, + { 0, 1, 1, 2, 126, 72, }, + { 2, 1, 1, 2, 126, 64, }, + { 0, 1, 1, 2, 134, 72, }, + { 2, 1, 1, 2, 134, 64, }, + { 0, 1, 1, 2, 142, 72, }, + { 2, 1, 1, 2, 142, 127, }, + { 0, 1, 1, 2, 151, 72, }, + { 2, 1, 1, 2, 151, -128, }, + { 0, 1, 1, 2, 159, 72, }, + { 2, 1, 1, 2, 159, -128, }, + { 0, 1, 1, 3, 38, 60, }, + { 2, 1, 1, 3, 38, 40, }, + { 0, 1, 1, 3, 46, 68, }, + { 2, 1, 1, 3, 46, 40, }, + { 0, 1, 1, 3, 54, 68, }, + { 2, 1, 1, 3, 54, 40, }, + { 0, 1, 1, 3, 62, 58, }, + { 2, 1, 1, 3, 62, 40, }, + { 0, 1, 1, 3, 102, 54, }, + { 2, 1, 1, 3, 102, 40, }, + { 0, 1, 1, 3, 110, 68, }, + { 2, 1, 1, 3, 110, 40, }, + { 0, 1, 1, 3, 118, 68, }, + { 2, 1, 1, 3, 118, 40, }, + { 0, 1, 1, 3, 126, 68, }, + { 2, 1, 1, 3, 126, 40, }, + { 0, 1, 1, 3, 134, 68, }, + { 2, 1, 1, 3, 134, 40, }, + { 0, 1, 1, 3, 142, 68, }, + { 2, 1, 1, 3, 142, 127, }, + { 0, 1, 1, 3, 151, 72, }, + { 2, 1, 1, 3, 151, -128, }, + { 0, 1, 1, 3, 159, 72, }, + { 2, 1, 1, 3, 159, -128, }, + { 0, 1, 2, 4, 42, 64, }, + { 2, 1, 2, 4, 42, 64, }, + { 0, 1, 2, 4, 58, 62, }, + { 2, 1, 2, 4, 58, 64, }, + { 0, 1, 2, 4, 106, 58, }, + { 2, 1, 2, 4, 106, 64, }, + { 0, 1, 2, 4, 122, 72, }, + { 2, 1, 2, 4, 122, 64, }, + { 0, 1, 2, 4, 138, 72, }, + { 2, 1, 2, 4, 138, 127, }, + { 0, 1, 2, 4, 155, 72, }, + { 2, 1, 2, 4, 155, -128, }, + { 0, 1, 2, 5, 42, 54, }, + { 2, 1, 2, 5, 42, 40, }, + { 0, 1, 2, 5, 58, 52, }, + { 2, 1, 2, 5, 58, 40, }, + { 0, 1, 2, 5, 106, 50, }, + { 2, 1, 2, 5, 106, 40, }, + { 0, 1, 2, 5, 122, 66, }, + { 2, 1, 2, 5, 122, 40, }, + { 0, 1, 2, 5, 138, 66, }, + { 2, 1, 2, 5, 138, 127, }, + { 0, 1, 2, 5, 155, 62, }, + { 2, 1, 2, 5, 155, -128, }, + { 1, 0, 0, 0, 1, 68, }, + { 3, 0, 0, 0, 1, 72, }, + { 4, 0, 0, 0, 1, 76, }, + { 5, 0, 0, 0, 1, 60, }, + { 6, 0, 0, 0, 1, 72, }, + { 7, 0, 0, 0, 1, 60, }, + { 8, 0, 0, 0, 1, 72, }, + { 1, 0, 0, 0, 2, 68, }, + { 3, 0, 0, 0, 2, 72, }, + { 4, 0, 0, 0, 2, 76, }, + { 5, 0, 0, 0, 2, 60, }, + { 6, 0, 0, 0, 2, 72, }, + { 7, 0, 0, 0, 2, 60, }, + { 8, 0, 0, 0, 2, 72, }, + { 1, 0, 0, 0, 3, 68, }, + { 3, 0, 0, 0, 3, 76, }, + { 4, 0, 0, 0, 3, 76, }, + { 5, 0, 0, 0, 3, 60, }, + { 6, 0, 0, 0, 3, 76, }, + { 7, 0, 0, 0, 3, 60, }, + { 8, 0, 0, 0, 3, 76, }, + { 1, 0, 0, 0, 4, 68, }, + { 3, 0, 0, 0, 4, 76, }, + { 4, 0, 0, 0, 4, 76, }, + { 5, 0, 0, 0, 4, 60, }, + { 6, 0, 0, 0, 4, 76, }, + { 7, 0, 0, 0, 4, 60, }, + { 8, 0, 0, 0, 4, 76, }, + { 1, 0, 0, 0, 5, 68, }, + { 3, 0, 0, 0, 5, 76, }, + { 4, 0, 0, 0, 5, 76, }, + { 5, 0, 0, 0, 5, 60, }, + { 6, 0, 0, 0, 5, 76, }, + { 7, 0, 0, 0, 5, 60, }, + { 8, 0, 0, 0, 5, 76, }, + { 1, 0, 0, 0, 6, 68, }, + { 3, 0, 0, 0, 6, 76, }, + { 4, 0, 0, 0, 6, 76, }, + { 5, 0, 0, 0, 6, 60, }, + { 6, 0, 0, 0, 6, 76, }, + { 7, 0, 0, 0, 6, 60, }, + { 8, 0, 0, 0, 6, 76, }, + { 1, 0, 0, 0, 7, 68, }, + { 3, 0, 0, 0, 7, 76, }, + { 4, 0, 0, 0, 7, 76, }, + { 5, 0, 0, 0, 7, 60, }, + { 6, 0, 0, 0, 7, 76, }, + { 7, 0, 0, 0, 7, 60, }, + { 8, 0, 0, 0, 7, 76, }, + { 1, 0, 0, 0, 8, 68, }, + { 3, 0, 0, 0, 8, 76, }, + { 4, 0, 0, 0, 8, 76, }, + { 5, 0, 0, 0, 8, 60, }, + { 6, 0, 0, 0, 8, 76, }, + { 7, 0, 0, 0, 8, 60, }, + { 8, 0, 0, 0, 8, 76, }, + { 1, 0, 0, 0, 9, 68, }, + { 3, 0, 0, 0, 9, 76, }, + { 4, 0, 0, 0, 9, 76, }, + { 5, 0, 0, 0, 9, 60, }, + { 6, 0, 0, 0, 9, 76, }, + { 7, 0, 0, 0, 9, 60, }, + { 8, 0, 0, 0, 9, 76, }, + { 1, 0, 0, 0, 10, 68, }, + { 3, 0, 0, 0, 10, 72, }, + { 4, 0, 0, 0, 10, 76, }, + { 5, 0, 0, 0, 10, 60, }, + { 6, 0, 0, 0, 10, 72, }, + { 7, 0, 0, 0, 10, 60, }, + { 8, 0, 0, 0, 10, 72, }, + { 1, 0, 0, 0, 11, 68, }, + { 3, 0, 0, 0, 11, 72, }, + { 4, 0, 0, 0, 11, 76, }, + { 5, 0, 0, 0, 11, 60, }, + { 6, 0, 0, 0, 11, 72, }, + { 7, 0, 0, 0, 11, 60, }, + { 8, 0, 0, 0, 11, 72, }, + { 1, 0, 0, 0, 12, 68, }, + { 3, 0, 0, 0, 12, 52, }, + { 4, 0, 0, 0, 12, 76, }, + { 5, 0, 0, 0, 12, 60, }, + { 6, 0, 0, 0, 12, 52, }, + { 7, 0, 0, 0, 12, 60, }, + { 8, 0, 0, 0, 12, 52, }, + { 1, 0, 0, 0, 13, 68, }, + { 3, 0, 0, 0, 13, 48, }, + { 4, 0, 0, 0, 13, 76, }, + { 5, 0, 0, 0, 13, 60, }, + { 6, 0, 0, 0, 13, 48, }, + { 7, 0, 0, 0, 13, 60, }, + { 8, 0, 0, 0, 13, 48, }, + { 1, 0, 0, 0, 14, 68, }, + { 3, 0, 0, 0, 14, 127, }, + { 4, 0, 0, 0, 14, 127, }, + { 5, 0, 0, 0, 14, 127, }, + { 6, 0, 0, 0, 14, 127, }, + { 7, 0, 0, 0, 14, 127, }, + { 8, 0, 0, 0, 14, 127, }, + { 1, 0, 0, 1, 1, 76, }, + { 3, 0, 0, 1, 1, 52, }, + { 4, 0, 0, 1, 1, 76, }, + { 5, 0, 0, 1, 1, 60, }, + { 6, 0, 0, 1, 1, 52, }, + { 7, 0, 0, 1, 1, 60, }, + { 8, 0, 0, 1, 1, 52, }, + { 1, 0, 0, 1, 2, 76, }, + { 3, 0, 0, 1, 2, 60, }, + { 4, 0, 0, 1, 2, 76, }, + { 5, 0, 0, 1, 2, 60, }, + { 6, 0, 0, 1, 2, 60, }, + { 7, 0, 0, 1, 2, 60, }, + { 8, 0, 0, 1, 2, 60, }, + { 1, 0, 0, 1, 3, 76, }, + { 3, 0, 0, 1, 3, 64, }, + { 4, 0, 0, 1, 3, 76, }, + { 5, 0, 0, 1, 3, 60, }, + { 6, 0, 0, 1, 3, 64, }, + { 7, 0, 0, 1, 3, 60, }, + { 8, 0, 0, 1, 3, 64, }, + { 1, 0, 0, 1, 4, 76, }, + { 3, 0, 0, 1, 4, 68, }, + { 4, 0, 0, 1, 4, 76, }, + { 5, 0, 0, 1, 4, 60, }, + { 6, 0, 0, 1, 4, 68, }, + { 7, 0, 0, 1, 4, 60, }, + { 8, 0, 0, 1, 4, 68, }, + { 1, 0, 0, 1, 5, 76, }, + { 3, 0, 0, 1, 5, 76, }, + { 4, 0, 0, 1, 5, 76, }, + { 5, 0, 0, 1, 5, 60, }, + { 6, 0, 0, 1, 5, 76, }, + { 7, 0, 0, 1, 5, 60, }, + { 8, 0, 0, 1, 5, 76, }, + { 1, 0, 0, 1, 6, 76, }, + { 3, 0, 0, 1, 6, 76, }, + { 4, 0, 0, 1, 6, 76, }, + { 5, 0, 0, 1, 6, 60, }, + { 6, 0, 0, 1, 6, 76, }, + { 7, 0, 0, 1, 6, 60, }, + { 8, 0, 0, 1, 6, 76, }, + { 1, 0, 0, 1, 7, 76, }, + { 3, 0, 0, 1, 7, 76, }, + { 4, 0, 0, 1, 7, 76, }, + { 5, 0, 0, 1, 7, 60, }, + { 6, 0, 0, 1, 7, 76, }, + { 7, 0, 0, 1, 7, 60, }, + { 8, 0, 0, 1, 7, 76, }, + { 1, 0, 0, 1, 8, 76, }, + { 3, 0, 0, 1, 8, 68, }, + { 4, 0, 0, 1, 8, 76, }, + { 5, 0, 0, 1, 8, 60, }, + { 6, 0, 0, 1, 8, 68, }, + { 7, 0, 0, 1, 8, 60, }, + { 8, 0, 0, 1, 8, 68, }, + { 1, 0, 0, 1, 9, 76, }, + { 3, 0, 0, 1, 9, 64, }, + { 4, 0, 0, 1, 9, 76, }, + { 5, 0, 0, 1, 9, 60, }, + { 6, 0, 0, 1, 9, 64, }, + { 7, 0, 0, 1, 9, 60, }, + { 8, 0, 0, 1, 9, 64, }, + { 1, 0, 0, 1, 10, 76, }, + { 3, 0, 0, 1, 10, 60, }, + { 4, 0, 0, 1, 10, 76, }, + { 5, 0, 0, 1, 10, 60, }, + { 6, 0, 0, 1, 10, 60, }, + { 7, 0, 0, 1, 10, 60, }, + { 8, 0, 0, 1, 10, 60, }, + { 1, 0, 0, 1, 11, 76, }, + { 3, 0, 0, 1, 11, 52, }, + { 4, 0, 0, 1, 11, 76, }, + { 5, 0, 0, 1, 11, 60, }, + { 6, 0, 0, 1, 11, 52, }, + { 7, 0, 0, 1, 11, 60, }, + { 8, 0, 0, 1, 11, 52, }, + { 1, 0, 0, 1, 12, 76, }, + { 3, 0, 0, 1, 12, 40, }, + { 4, 0, 0, 1, 12, 76, }, + { 5, 0, 0, 1, 12, 60, }, + { 6, 0, 0, 1, 12, 40, }, + { 7, 0, 0, 1, 12, 60, }, + { 8, 0, 0, 1, 12, 40, }, + { 1, 0, 0, 1, 13, 76, }, + { 3, 0, 0, 1, 13, 28, }, + { 4, 0, 0, 1, 13, 70, }, + { 5, 0, 0, 1, 13, 60, }, + { 6, 0, 0, 1, 13, 28, }, + { 7, 0, 0, 1, 13, 60, }, + { 8, 0, 0, 1, 13, 28, }, + { 1, 0, 0, 1, 14, 127, }, + { 3, 0, 0, 1, 14, 127, }, + { 4, 0, 0, 1, 14, 127, }, + { 5, 0, 0, 1, 14, 127, }, + { 6, 0, 0, 1, 14, 127, }, + { 7, 0, 0, 1, 14, 127, }, + { 8, 0, 0, 1, 14, 127, }, + { 1, 0, 0, 2, 1, 76, }, + { 3, 0, 0, 2, 1, 52, }, + { 4, 0, 0, 2, 1, 76, }, + { 5, 0, 0, 2, 1, 60, }, + { 6, 0, 0, 2, 1, 52, }, + { 7, 0, 0, 2, 1, 60, }, + { 8, 0, 0, 2, 1, 52, }, + { 1, 0, 0, 2, 2, 76, }, + { 3, 0, 0, 2, 2, 60, }, + { 4, 0, 0, 2, 2, 76, }, + { 5, 0, 0, 2, 2, 60, }, + { 6, 0, 0, 2, 2, 60, }, + { 7, 0, 0, 2, 2, 60, }, + { 8, 0, 0, 2, 2, 60, }, + { 1, 0, 0, 2, 3, 76, }, + { 3, 0, 0, 2, 3, 64, }, + { 4, 0, 0, 2, 3, 76, }, + { 5, 0, 0, 2, 3, 60, }, + { 6, 0, 0, 2, 3, 64, }, + { 7, 0, 0, 2, 3, 60, }, + { 8, 0, 0, 2, 3, 64, }, + { 1, 0, 0, 2, 4, 76, }, + { 3, 0, 0, 2, 4, 68, }, + { 4, 0, 0, 2, 4, 76, }, + { 5, 0, 0, 2, 4, 60, }, + { 6, 0, 0, 2, 4, 68, }, + { 7, 0, 0, 2, 4, 60, }, + { 8, 0, 0, 2, 4, 68, }, + { 1, 0, 0, 2, 5, 76, }, + { 3, 0, 0, 2, 5, 76, }, + { 4, 0, 0, 2, 5, 76, }, + { 5, 0, 0, 2, 5, 60, }, + { 6, 0, 0, 2, 5, 76, }, + { 7, 0, 0, 2, 5, 60, }, + { 8, 0, 0, 2, 5, 76, }, + { 1, 0, 0, 2, 6, 76, }, + { 3, 0, 0, 2, 6, 76, }, + { 4, 0, 0, 2, 6, 76, }, + { 5, 0, 0, 2, 6, 60, }, + { 6, 0, 0, 2, 6, 76, }, + { 7, 0, 0, 2, 6, 60, }, + { 8, 0, 0, 2, 6, 76, }, + { 1, 0, 0, 2, 7, 76, }, + { 3, 0, 0, 2, 7, 76, }, + { 4, 0, 0, 2, 7, 76, }, + { 5, 0, 0, 2, 7, 60, }, + { 6, 0, 0, 2, 7, 76, }, + { 7, 0, 0, 2, 7, 60, }, + { 8, 0, 0, 2, 7, 76, }, + { 1, 0, 0, 2, 8, 76, }, + { 3, 0, 0, 2, 8, 68, }, + { 4, 0, 0, 2, 8, 76, }, + { 5, 0, 0, 2, 8, 60, }, + { 6, 0, 0, 2, 8, 68, }, + { 7, 0, 0, 2, 8, 60, }, + { 8, 0, 0, 2, 8, 68, }, + { 1, 0, 0, 2, 9, 76, }, + { 3, 0, 0, 2, 9, 64, }, + { 4, 0, 0, 2, 9, 76, }, + { 5, 0, 0, 2, 9, 60, }, + { 6, 0, 0, 2, 9, 64, }, + { 7, 0, 0, 2, 9, 60, }, + { 8, 0, 0, 2, 9, 64, }, + { 1, 0, 0, 2, 10, 76, }, + { 3, 0, 0, 2, 10, 60, }, + { 4, 0, 0, 2, 10, 76, }, + { 5, 0, 0, 2, 10, 60, }, + { 6, 0, 0, 2, 10, 60, }, + { 7, 0, 0, 2, 10, 60, }, + { 8, 0, 0, 2, 10, 60, }, + { 1, 0, 0, 2, 11, 76, }, + { 3, 0, 0, 2, 11, 52, }, + { 4, 0, 0, 2, 11, 76, }, + { 5, 0, 0, 2, 11, 60, }, + { 6, 0, 0, 2, 11, 52, }, + { 7, 0, 0, 2, 11, 60, }, + { 8, 0, 0, 2, 11, 52, }, + { 1, 0, 0, 2, 12, 76, }, + { 3, 0, 0, 2, 12, 40, }, + { 4, 0, 0, 2, 12, 76, }, + { 5, 0, 0, 2, 12, 60, }, + { 6, 0, 0, 2, 12, 40, }, + { 7, 0, 0, 2, 12, 60, }, + { 8, 0, 0, 2, 12, 40, }, + { 1, 0, 0, 2, 13, 76, }, + { 3, 0, 0, 2, 13, 28, }, + { 4, 0, 0, 2, 13, 72, }, + { 5, 0, 0, 2, 13, 60, }, + { 6, 0, 0, 2, 13, 28, }, + { 7, 0, 0, 2, 13, 60, }, + { 8, 0, 0, 2, 13, 28, }, + { 1, 0, 0, 2, 14, 127, }, + { 3, 0, 0, 2, 14, 127, }, + { 4, 0, 0, 2, 14, 127, }, + { 5, 0, 0, 2, 14, 127, }, + { 6, 0, 0, 2, 14, 127, }, + { 7, 0, 0, 2, 14, 127, }, + { 8, 0, 0, 2, 14, 127, }, + { 1, 0, 0, 3, 1, 66, }, + { 3, 0, 0, 3, 1, 52, }, + { 4, 0, 0, 3, 1, 68, }, + { 5, 0, 0, 3, 1, 36, }, + { 6, 0, 0, 3, 1, 52, }, + { 7, 0, 0, 3, 1, 36, }, + { 8, 0, 0, 3, 1, 52, }, + { 1, 0, 0, 3, 2, 66, }, + { 3, 0, 0, 3, 2, 60, }, + { 4, 0, 0, 3, 2, 70, }, + { 5, 0, 0, 3, 2, 36, }, + { 6, 0, 0, 3, 2, 60, }, + { 7, 0, 0, 3, 2, 36, }, + { 8, 0, 0, 3, 2, 60, }, + { 1, 0, 0, 3, 3, 66, }, + { 3, 0, 0, 3, 3, 64, }, + { 4, 0, 0, 3, 3, 70, }, + { 5, 0, 0, 3, 3, 36, }, + { 6, 0, 0, 3, 3, 64, }, + { 7, 0, 0, 3, 3, 36, }, + { 8, 0, 0, 3, 3, 64, }, + { 1, 0, 0, 3, 4, 66, }, + { 3, 0, 0, 3, 4, 68, }, + { 4, 0, 0, 3, 4, 70, }, + { 5, 0, 0, 3, 4, 36, }, + { 6, 0, 0, 3, 4, 68, }, + { 7, 0, 0, 3, 4, 36, }, + { 8, 0, 0, 3, 4, 68, }, + { 1, 0, 0, 3, 5, 66, }, + { 3, 0, 0, 3, 5, 76, }, + { 4, 0, 0, 3, 5, 70, }, + { 5, 0, 0, 3, 5, 36, }, + { 6, 0, 0, 3, 5, 76, }, + { 7, 0, 0, 3, 5, 36, }, + { 8, 0, 0, 3, 5, 76, }, + { 1, 0, 0, 3, 6, 66, }, + { 3, 0, 0, 3, 6, 76, }, + { 4, 0, 0, 3, 6, 70, }, + { 5, 0, 0, 3, 6, 36, }, + { 6, 0, 0, 3, 6, 76, }, + { 7, 0, 0, 3, 6, 36, }, + { 8, 0, 0, 3, 6, 76, }, + { 1, 0, 0, 3, 7, 66, }, + { 3, 0, 0, 3, 7, 76, }, + { 4, 0, 0, 3, 7, 70, }, + { 5, 0, 0, 3, 7, 36, }, + { 6, 0, 0, 3, 7, 76, }, + { 7, 0, 0, 3, 7, 36, }, + { 8, 0, 0, 3, 7, 76, }, + { 1, 0, 0, 3, 8, 66, }, + { 3, 0, 0, 3, 8, 68, }, + { 4, 0, 0, 3, 8, 70, }, + { 5, 0, 0, 3, 8, 36, }, + { 6, 0, 0, 3, 8, 68, }, + { 7, 0, 0, 3, 8, 36, }, + { 8, 0, 0, 3, 8, 68, }, + { 1, 0, 0, 3, 9, 66, }, + { 3, 0, 0, 3, 9, 64, }, + { 4, 0, 0, 3, 9, 70, }, + { 5, 0, 0, 3, 9, 36, }, + { 6, 0, 0, 3, 9, 64, }, + { 7, 0, 0, 3, 9, 36, }, + { 8, 0, 0, 3, 9, 64, }, + { 1, 0, 0, 3, 10, 66, }, + { 3, 0, 0, 3, 10, 60, }, + { 4, 0, 0, 3, 10, 70, }, + { 5, 0, 0, 3, 10, 36, }, + { 6, 0, 0, 3, 10, 60, }, + { 7, 0, 0, 3, 10, 36, }, + { 8, 0, 0, 3, 10, 60, }, + { 1, 0, 0, 3, 11, 66, }, + { 3, 0, 0, 3, 11, 52, }, + { 4, 0, 0, 3, 11, 70, }, + { 5, 0, 0, 3, 11, 36, }, + { 6, 0, 0, 3, 11, 52, }, + { 7, 0, 0, 3, 11, 36, }, + { 8, 0, 0, 3, 11, 52, }, + { 1, 0, 0, 3, 12, 66, }, + { 3, 0, 0, 3, 12, 40, }, + { 4, 0, 0, 3, 12, 70, }, + { 5, 0, 0, 3, 12, 36, }, + { 6, 0, 0, 3, 12, 40, }, + { 7, 0, 0, 3, 12, 36, }, + { 8, 0, 0, 3, 12, 40, }, + { 1, 0, 0, 3, 13, 66, }, + { 3, 0, 0, 3, 13, 28, }, + { 4, 0, 0, 3, 13, 62, }, + { 5, 0, 0, 3, 13, 36, }, + { 6, 0, 0, 3, 13, 28, }, + { 7, 0, 0, 3, 13, 36, }, + { 8, 0, 0, 3, 13, 28, }, + { 1, 0, 0, 3, 14, 127, }, + { 3, 0, 0, 3, 14, 127, }, + { 4, 0, 0, 3, 14, 127, }, + { 5, 0, 0, 3, 14, 127, }, + { 6, 0, 0, 3, 14, 127, }, + { 7, 0, 0, 3, 14, 127, }, + { 8, 0, 0, 3, 14, 127, }, + { 1, 0, 1, 2, 1, 127, }, + { 3, 0, 1, 2, 1, 127, }, + { 4, 0, 1, 2, 1, 127, }, + { 5, 0, 1, 2, 1, 127, }, + { 6, 0, 1, 2, 1, 127, }, + { 7, 0, 1, 2, 1, 127, }, + { 8, 0, 1, 2, 1, 127, }, + { 1, 0, 1, 2, 2, 127, }, + { 3, 0, 1, 2, 2, 127, }, + { 4, 0, 1, 2, 2, 127, }, + { 5, 0, 1, 2, 2, 127, }, + { 6, 0, 1, 2, 2, 127, }, + { 7, 0, 1, 2, 2, 127, }, + { 8, 0, 1, 2, 2, 127, }, + { 1, 0, 1, 2, 3, 72, }, + { 3, 0, 1, 2, 3, 52, }, + { 4, 0, 1, 2, 3, 72, }, + { 5, 0, 1, 2, 3, 60, }, + { 6, 0, 1, 2, 3, 52, }, + { 7, 0, 1, 2, 3, 60, }, + { 8, 0, 1, 2, 3, 52, }, + { 1, 0, 1, 2, 4, 72, }, + { 3, 0, 1, 2, 4, 52, }, + { 4, 0, 1, 2, 4, 72, }, + { 5, 0, 1, 2, 4, 60, }, + { 6, 0, 1, 2, 4, 52, }, + { 7, 0, 1, 2, 4, 60, }, + { 8, 0, 1, 2, 4, 52, }, + { 1, 0, 1, 2, 5, 72, }, + { 3, 0, 1, 2, 5, 60, }, + { 4, 0, 1, 2, 5, 72, }, + { 5, 0, 1, 2, 5, 60, }, + { 6, 0, 1, 2, 5, 60, }, + { 7, 0, 1, 2, 5, 60, }, + { 8, 0, 1, 2, 5, 60, }, + { 1, 0, 1, 2, 6, 72, }, + { 3, 0, 1, 2, 6, 64, }, + { 4, 0, 1, 2, 6, 72, }, + { 5, 0, 1, 2, 6, 60, }, + { 6, 0, 1, 2, 6, 64, }, + { 7, 0, 1, 2, 6, 60, }, + { 8, 0, 1, 2, 6, 64, }, + { 1, 0, 1, 2, 7, 72, }, + { 3, 0, 1, 2, 7, 60, }, + { 4, 0, 1, 2, 7, 72, }, + { 5, 0, 1, 2, 7, 60, }, + { 6, 0, 1, 2, 7, 60, }, + { 7, 0, 1, 2, 7, 60, }, + { 8, 0, 1, 2, 7, 60, }, + { 1, 0, 1, 2, 8, 72, }, + { 3, 0, 1, 2, 8, 52, }, + { 4, 0, 1, 2, 8, 72, }, + { 5, 0, 1, 2, 8, 60, }, + { 6, 0, 1, 2, 8, 52, }, + { 7, 0, 1, 2, 8, 60, }, + { 8, 0, 1, 2, 8, 52, }, + { 1, 0, 1, 2, 9, 72, }, + { 3, 0, 1, 2, 9, 52, }, + { 4, 0, 1, 2, 9, 72, }, + { 5, 0, 1, 2, 9, 60, }, + { 6, 0, 1, 2, 9, 52, }, + { 7, 0, 1, 2, 9, 60, }, + { 8, 0, 1, 2, 9, 52, }, + { 1, 0, 1, 2, 10, 72, }, + { 3, 0, 1, 2, 10, 40, }, + { 4, 0, 1, 2, 10, 72, }, + { 5, 0, 1, 2, 10, 60, }, + { 6, 0, 1, 2, 10, 40, }, + { 7, 0, 1, 2, 10, 60, }, + { 8, 0, 1, 2, 10, 40, }, + { 1, 0, 1, 2, 11, 72, }, + { 3, 0, 1, 2, 11, 28, }, + { 4, 0, 1, 2, 11, 70, }, + { 5, 0, 1, 2, 11, 60, }, + { 6, 0, 1, 2, 11, 28, }, + { 7, 0, 1, 2, 11, 60, }, + { 8, 0, 1, 2, 11, 28, }, + { 1, 0, 1, 2, 12, 127, }, + { 3, 0, 1, 2, 12, 127, }, + { 4, 0, 1, 2, 12, 127, }, + { 5, 0, 1, 2, 12, 127, }, + { 6, 0, 1, 2, 12, 127, }, + { 7, 0, 1, 2, 12, 127, }, + { 8, 0, 1, 2, 12, 127, }, + { 1, 0, 1, 2, 13, 127, }, + { 3, 0, 1, 2, 13, 127, }, + { 4, 0, 1, 2, 13, 127, }, + { 5, 0, 1, 2, 13, 127, }, + { 6, 0, 1, 2, 13, 127, }, + { 7, 0, 1, 2, 13, 127, }, + { 8, 0, 1, 2, 13, 127, }, + { 1, 0, 1, 2, 14, 127, }, + { 3, 0, 1, 2, 14, 127, }, + { 4, 0, 1, 2, 14, 127, }, + { 5, 0, 1, 2, 14, 127, }, + { 6, 0, 1, 2, 14, 127, }, + { 7, 0, 1, 2, 14, 127, }, + { 8, 0, 1, 2, 14, 127, }, + { 1, 0, 1, 3, 1, 127, }, + { 3, 0, 1, 3, 1, 127, }, + { 4, 0, 1, 3, 1, 127, }, + { 5, 0, 1, 3, 1, 127, }, + { 6, 0, 1, 3, 1, 127, }, + { 7, 0, 1, 3, 1, 127, }, + { 8, 0, 1, 3, 1, 127, }, + { 1, 0, 1, 3, 2, 127, }, + { 3, 0, 1, 3, 2, 127, }, + { 4, 0, 1, 3, 2, 127, }, + { 5, 0, 1, 3, 2, 127, }, + { 6, 0, 1, 3, 2, 127, }, + { 7, 0, 1, 3, 2, 127, }, + { 8, 0, 1, 3, 2, 127, }, + { 1, 0, 1, 3, 3, 66, }, + { 3, 0, 1, 3, 3, 48, }, + { 4, 0, 1, 3, 3, 66, }, + { 5, 0, 1, 3, 3, 36, }, + { 6, 0, 1, 3, 3, 48, }, + { 7, 0, 1, 3, 3, 36, }, + { 8, 0, 1, 3, 3, 48, }, + { 1, 0, 1, 3, 4, 66, }, + { 3, 0, 1, 3, 4, 48, }, + { 4, 0, 1, 3, 4, 70, }, + { 5, 0, 1, 3, 4, 36, }, + { 6, 0, 1, 3, 4, 48, }, + { 7, 0, 1, 3, 4, 36, }, + { 8, 0, 1, 3, 4, 48, }, + { 1, 0, 1, 3, 5, 66, }, + { 3, 0, 1, 3, 5, 60, }, + { 4, 0, 1, 3, 5, 70, }, + { 5, 0, 1, 3, 5, 36, }, + { 6, 0, 1, 3, 5, 60, }, + { 7, 0, 1, 3, 5, 36, }, + { 8, 0, 1, 3, 5, 60, }, + { 1, 0, 1, 3, 6, 66, }, + { 3, 0, 1, 3, 6, 64, }, + { 4, 0, 1, 3, 6, 70, }, + { 5, 0, 1, 3, 6, 36, }, + { 6, 0, 1, 3, 6, 64, }, + { 7, 0, 1, 3, 6, 36, }, + { 8, 0, 1, 3, 6, 64, }, + { 1, 0, 1, 3, 7, 66, }, + { 3, 0, 1, 3, 7, 60, }, + { 4, 0, 1, 3, 7, 70, }, + { 5, 0, 1, 3, 7, 36, }, + { 6, 0, 1, 3, 7, 60, }, + { 7, 0, 1, 3, 7, 36, }, + { 8, 0, 1, 3, 7, 60, }, + { 1, 0, 1, 3, 8, 66, }, + { 3, 0, 1, 3, 8, 52, }, + { 4, 0, 1, 3, 8, 70, }, + { 5, 0, 1, 3, 8, 36, }, + { 6, 0, 1, 3, 8, 52, }, + { 7, 0, 1, 3, 8, 36, }, + { 8, 0, 1, 3, 8, 52, }, + { 1, 0, 1, 3, 9, 66, }, + { 3, 0, 1, 3, 9, 52, }, + { 4, 0, 1, 3, 9, 70, }, + { 5, 0, 1, 3, 9, 36, }, + { 6, 0, 1, 3, 9, 52, }, + { 7, 0, 1, 3, 9, 36, }, + { 8, 0, 1, 3, 9, 52, }, + { 1, 0, 1, 3, 10, 66, }, + { 3, 0, 1, 3, 10, 40, }, + { 4, 0, 1, 3, 10, 70, }, + { 5, 0, 1, 3, 10, 36, }, + { 6, 0, 1, 3, 10, 40, }, + { 7, 0, 1, 3, 10, 36, }, + { 8, 0, 1, 3, 10, 40, }, + { 1, 0, 1, 3, 11, 66, }, + { 3, 0, 1, 3, 11, 26, }, + { 4, 0, 1, 3, 11, 66, }, + { 5, 0, 1, 3, 11, 36, }, + { 6, 0, 1, 3, 11, 26, }, + { 7, 0, 1, 3, 11, 36, }, + { 8, 0, 1, 3, 11, 26, }, + { 1, 0, 1, 3, 12, 127, }, + { 3, 0, 1, 3, 12, 127, }, + { 4, 0, 1, 3, 12, 127, }, + { 5, 0, 1, 3, 12, 127, }, + { 6, 0, 1, 3, 12, 127, }, + { 7, 0, 1, 3, 12, 127, }, + { 8, 0, 1, 3, 12, 127, }, + { 1, 0, 1, 3, 13, 127, }, + { 3, 0, 1, 3, 13, 127, }, + { 4, 0, 1, 3, 13, 127, }, + { 5, 0, 1, 3, 13, 127, }, + { 6, 0, 1, 3, 13, 127, }, + { 7, 0, 1, 3, 13, 127, }, + { 8, 0, 1, 3, 13, 127, }, + { 1, 0, 1, 3, 14, 127, }, + { 3, 0, 1, 3, 14, 127, }, + { 4, 0, 1, 3, 14, 127, }, + { 5, 0, 1, 3, 14, 127, }, + { 6, 0, 1, 3, 14, 127, }, + { 7, 0, 1, 3, 14, 127, }, + { 8, 0, 1, 3, 14, 127, }, + { 1, 1, 0, 1, 36, 60, }, + { 3, 1, 0, 1, 36, 62, }, + { 4, 1, 0, 1, 36, 76, }, + { 5, 1, 0, 1, 36, 62, }, + { 6, 1, 0, 1, 36, 64, }, + { 7, 1, 0, 1, 36, 54, }, + { 8, 1, 0, 1, 36, 62, }, + { 1, 1, 0, 1, 40, 62, }, + { 3, 1, 0, 1, 40, 62, }, + { 4, 1, 0, 1, 40, 76, }, + { 5, 1, 0, 1, 40, 62, }, + { 6, 1, 0, 1, 40, 64, }, + { 7, 1, 0, 1, 40, 54, }, + { 8, 1, 0, 1, 40, 62, }, + { 1, 1, 0, 1, 44, 62, }, + { 3, 1, 0, 1, 44, 62, }, + { 4, 1, 0, 1, 44, 76, }, + { 5, 1, 0, 1, 44, 62, }, + { 6, 1, 0, 1, 44, 64, }, + { 7, 1, 0, 1, 44, 54, }, + { 8, 1, 0, 1, 44, 62, }, + { 1, 1, 0, 1, 48, 62, }, + { 3, 1, 0, 1, 48, 62, }, + { 4, 1, 0, 1, 48, 76, }, + { 5, 1, 0, 1, 48, 62, }, + { 6, 1, 0, 1, 48, 64, }, + { 7, 1, 0, 1, 48, 54, }, + { 8, 1, 0, 1, 48, 62, }, + { 1, 1, 0, 1, 52, 62, }, + { 3, 1, 0, 1, 52, 64, }, + { 4, 1, 0, 1, 52, 76, }, + { 5, 1, 0, 1, 52, 62, }, + { 6, 1, 0, 1, 52, 76, }, + { 7, 1, 0, 1, 52, 54, }, + { 8, 1, 0, 1, 52, 76, }, + { 1, 1, 0, 1, 56, 62, }, + { 3, 1, 0, 1, 56, 64, }, + { 4, 1, 0, 1, 56, 76, }, + { 5, 1, 0, 1, 56, 62, }, + { 6, 1, 0, 1, 56, 76, }, + { 7, 1, 0, 1, 56, 54, }, + { 8, 1, 0, 1, 56, 76, }, + { 1, 1, 0, 1, 60, 62, }, + { 3, 1, 0, 1, 60, 64, }, + { 4, 1, 0, 1, 60, 76, }, + { 5, 1, 0, 1, 60, 62, }, + { 6, 1, 0, 1, 60, 76, }, + { 7, 1, 0, 1, 60, 54, }, + { 8, 1, 0, 1, 60, 76, }, + { 1, 1, 0, 1, 64, 60, }, + { 3, 1, 0, 1, 64, 64, }, + { 4, 1, 0, 1, 64, 76, }, + { 5, 1, 0, 1, 64, 62, }, + { 6, 1, 0, 1, 64, 74, }, + { 7, 1, 0, 1, 64, 54, }, + { 8, 1, 0, 1, 64, 74, }, + { 1, 1, 0, 1, 100, 76, }, + { 3, 1, 0, 1, 100, 72, }, + { 4, 1, 0, 1, 100, 76, }, + { 5, 1, 0, 1, 100, 62, }, + { 6, 1, 0, 1, 100, 72, }, + { 7, 1, 0, 1, 100, 54, }, + { 8, 1, 0, 1, 100, 72, }, + { 1, 1, 0, 1, 104, 76, }, + { 3, 1, 0, 1, 104, 76, }, + { 4, 1, 0, 1, 104, 76, }, + { 5, 1, 0, 1, 104, 62, }, + { 6, 1, 0, 1, 104, 76, }, + { 7, 1, 0, 1, 104, 54, }, + { 8, 1, 0, 1, 104, 76, }, + { 1, 1, 0, 1, 108, 76, }, + { 3, 1, 0, 1, 108, 76, }, + { 4, 1, 0, 1, 108, 76, }, + { 5, 1, 0, 1, 108, 62, }, + { 6, 1, 0, 1, 108, 76, }, + { 7, 1, 0, 1, 108, 54, }, + { 8, 1, 0, 1, 108, 76, }, + { 1, 1, 0, 1, 112, 76, }, + { 3, 1, 0, 1, 112, 76, }, + { 4, 1, 0, 1, 112, 76, }, + { 5, 1, 0, 1, 112, 62, }, + { 6, 1, 0, 1, 112, 76, }, + { 7, 1, 0, 1, 112, 54, }, + { 8, 1, 0, 1, 112, 76, }, + { 1, 1, 0, 1, 116, 76, }, + { 3, 1, 0, 1, 116, 76, }, + { 4, 1, 0, 1, 116, 76, }, + { 5, 1, 0, 1, 116, 62, }, + { 6, 1, 0, 1, 116, 76, }, + { 7, 1, 0, 1, 116, 54, }, + { 8, 1, 0, 1, 116, 76, }, + { 1, 1, 0, 1, 120, 76, }, + { 3, 1, 0, 1, 120, 127, }, + { 4, 1, 0, 1, 120, 76, }, + { 5, 1, 0, 1, 120, 127, }, + { 6, 1, 0, 1, 120, 76, }, + { 7, 1, 0, 1, 120, 54, }, + { 8, 1, 0, 1, 120, 76, }, + { 1, 1, 0, 1, 124, 76, }, + { 3, 1, 0, 1, 124, 127, }, + { 4, 1, 0, 1, 124, 76, }, + { 5, 1, 0, 1, 124, 127, }, + { 6, 1, 0, 1, 124, 76, }, + { 7, 1, 0, 1, 124, 54, }, + { 8, 1, 0, 1, 124, 76, }, + { 1, 1, 0, 1, 128, 76, }, + { 3, 1, 0, 1, 128, 127, }, + { 4, 1, 0, 1, 128, 76, }, + { 5, 1, 0, 1, 128, 127, }, + { 6, 1, 0, 1, 128, 76, }, + { 7, 1, 0, 1, 128, 54, }, + { 8, 1, 0, 1, 128, 76, }, + { 1, 1, 0, 1, 132, 76, }, + { 3, 1, 0, 1, 132, 76, }, + { 4, 1, 0, 1, 132, 76, }, + { 5, 1, 0, 1, 132, 62, }, + { 6, 1, 0, 1, 132, 76, }, + { 7, 1, 0, 1, 132, 54, }, + { 8, 1, 0, 1, 132, 76, }, + { 1, 1, 0, 1, 136, 76, }, + { 3, 1, 0, 1, 136, 76, }, + { 4, 1, 0, 1, 136, 76, }, + { 5, 1, 0, 1, 136, 62, }, + { 6, 1, 0, 1, 136, 76, }, + { 7, 1, 0, 1, 136, 127, }, + { 8, 1, 0, 1, 136, 76, }, + { 1, 1, 0, 1, 140, 76, }, + { 3, 1, 0, 1, 140, 72, }, + { 4, 1, 0, 1, 140, 76, }, + { 5, 1, 0, 1, 140, 62, }, + { 6, 1, 0, 1, 140, 72, }, + { 7, 1, 0, 1, 140, 127, }, + { 8, 1, 0, 1, 140, 72, }, + { 1, 1, 0, 1, 144, 127, }, + { 3, 1, 0, 1, 144, 76, }, + { 4, 1, 0, 1, 144, 76, }, + { 5, 1, 0, 1, 144, 127, }, + { 6, 1, 0, 1, 144, 76, }, + { 7, 1, 0, 1, 144, 127, }, + { 8, 1, 0, 1, 144, 76, }, + { 1, 1, 0, 1, 149, 127, }, + { 3, 1, 0, 1, 149, 76, }, + { 4, 1, 0, 1, 149, 74, }, + { 5, 1, 0, 1, 149, 76, }, + { 6, 1, 0, 1, 149, 76, }, + { 7, 1, 0, 1, 149, 54, }, + { 8, 1, 0, 1, 149, 76, }, + { 1, 1, 0, 1, 153, 127, }, + { 3, 1, 0, 1, 153, 76, }, + { 4, 1, 0, 1, 153, 74, }, + { 5, 1, 0, 1, 153, 76, }, + { 6, 1, 0, 1, 153, 76, }, + { 7, 1, 0, 1, 153, 54, }, + { 8, 1, 0, 1, 153, 76, }, + { 1, 1, 0, 1, 157, 127, }, + { 3, 1, 0, 1, 157, 76, }, + { 4, 1, 0, 1, 157, 74, }, + { 5, 1, 0, 1, 157, 76, }, + { 6, 1, 0, 1, 157, 76, }, + { 7, 1, 0, 1, 157, 54, }, + { 8, 1, 0, 1, 157, 76, }, + { 1, 1, 0, 1, 161, 127, }, + { 3, 1, 0, 1, 161, 76, }, + { 4, 1, 0, 1, 161, 74, }, + { 5, 1, 0, 1, 161, 76, }, + { 6, 1, 0, 1, 161, 76, }, + { 7, 1, 0, 1, 161, 54, }, + { 8, 1, 0, 1, 161, 76, }, + { 1, 1, 0, 1, 165, 127, }, + { 3, 1, 0, 1, 165, 76, }, + { 4, 1, 0, 1, 165, 74, }, + { 5, 1, 0, 1, 165, 76, }, + { 6, 1, 0, 1, 165, 76, }, + { 7, 1, 0, 1, 165, 54, }, + { 8, 1, 0, 1, 165, 76, }, + { 1, 1, 0, 2, 36, 62, }, + { 3, 1, 0, 2, 36, 62, }, + { 4, 1, 0, 2, 36, 76, }, + { 5, 1, 0, 2, 36, 62, }, + { 6, 1, 0, 2, 36, 64, }, + { 7, 1, 0, 2, 36, 54, }, + { 8, 1, 0, 2, 36, 62, }, + { 1, 1, 0, 2, 40, 62, }, + { 3, 1, 0, 2, 40, 62, }, + { 4, 1, 0, 2, 40, 76, }, + { 5, 1, 0, 2, 40, 62, }, + { 6, 1, 0, 2, 40, 64, }, + { 7, 1, 0, 2, 40, 54, }, + { 8, 1, 0, 2, 40, 62, }, + { 1, 1, 0, 2, 44, 62, }, + { 3, 1, 0, 2, 44, 62, }, + { 4, 1, 0, 2, 44, 76, }, + { 5, 1, 0, 2, 44, 62, }, + { 6, 1, 0, 2, 44, 64, }, + { 7, 1, 0, 2, 44, 54, }, + { 8, 1, 0, 2, 44, 62, }, + { 1, 1, 0, 2, 48, 62, }, + { 3, 1, 0, 2, 48, 62, }, + { 4, 1, 0, 2, 48, 76, }, + { 5, 1, 0, 2, 48, 62, }, + { 6, 1, 0, 2, 48, 64, }, + { 7, 1, 0, 2, 48, 54, }, + { 8, 1, 0, 2, 48, 62, }, + { 1, 1, 0, 2, 52, 62, }, + { 3, 1, 0, 2, 52, 64, }, + { 4, 1, 0, 2, 52, 76, }, + { 5, 1, 0, 2, 52, 62, }, + { 6, 1, 0, 2, 52, 76, }, + { 7, 1, 0, 2, 52, 54, }, + { 8, 1, 0, 2, 52, 76, }, + { 1, 1, 0, 2, 56, 62, }, + { 3, 1, 0, 2, 56, 64, }, + { 4, 1, 0, 2, 56, 76, }, + { 5, 1, 0, 2, 56, 62, }, + { 6, 1, 0, 2, 56, 76, }, + { 7, 1, 0, 2, 56, 54, }, + { 8, 1, 0, 2, 56, 76, }, + { 1, 1, 0, 2, 60, 62, }, + { 3, 1, 0, 2, 60, 64, }, + { 4, 1, 0, 2, 60, 76, }, + { 5, 1, 0, 2, 60, 62, }, + { 6, 1, 0, 2, 60, 76, }, + { 7, 1, 0, 2, 60, 54, }, + { 8, 1, 0, 2, 60, 76, }, + { 1, 1, 0, 2, 64, 60, }, + { 3, 1, 0, 2, 64, 64, }, + { 4, 1, 0, 2, 64, 74, }, + { 5, 1, 0, 2, 64, 62, }, + { 6, 1, 0, 2, 64, 74, }, + { 7, 1, 0, 2, 64, 54, }, + { 8, 1, 0, 2, 64, 74, }, + { 1, 1, 0, 2, 100, 76, }, + { 3, 1, 0, 2, 100, 70, }, + { 4, 1, 0, 2, 100, 76, }, + { 5, 1, 0, 2, 100, 62, }, + { 6, 1, 0, 2, 100, 70, }, + { 7, 1, 0, 2, 100, 54, }, + { 8, 1, 0, 2, 100, 70, }, + { 1, 1, 0, 2, 104, 76, }, + { 3, 1, 0, 2, 104, 76, }, + { 4, 1, 0, 2, 104, 76, }, + { 5, 1, 0, 2, 104, 62, }, + { 6, 1, 0, 2, 104, 76, }, + { 7, 1, 0, 2, 104, 54, }, + { 8, 1, 0, 2, 104, 76, }, + { 1, 1, 0, 2, 108, 76, }, + { 3, 1, 0, 2, 108, 76, }, + { 4, 1, 0, 2, 108, 76, }, + { 5, 1, 0, 2, 108, 62, }, + { 6, 1, 0, 2, 108, 76, }, + { 7, 1, 0, 2, 108, 54, }, + { 8, 1, 0, 2, 108, 76, }, + { 1, 1, 0, 2, 112, 76, }, + { 3, 1, 0, 2, 112, 76, }, + { 4, 1, 0, 2, 112, 76, }, + { 5, 1, 0, 2, 112, 62, }, + { 6, 1, 0, 2, 112, 76, }, + { 7, 1, 0, 2, 112, 54, }, + { 8, 1, 0, 2, 112, 76, }, + { 1, 1, 0, 2, 116, 76, }, + { 3, 1, 0, 2, 116, 76, }, + { 4, 1, 0, 2, 116, 76, }, + { 5, 1, 0, 2, 116, 62, }, + { 6, 1, 0, 2, 116, 76, }, + { 7, 1, 0, 2, 116, 54, }, + { 8, 1, 0, 2, 116, 76, }, + { 1, 1, 0, 2, 120, 76, }, + { 3, 1, 0, 2, 120, 127, }, + { 4, 1, 0, 2, 120, 76, }, + { 5, 1, 0, 2, 120, 127, }, + { 6, 1, 0, 2, 120, 76, }, + { 7, 1, 0, 2, 120, 54, }, + { 8, 1, 0, 2, 120, 76, }, + { 1, 1, 0, 2, 124, 76, }, + { 3, 1, 0, 2, 124, 127, }, + { 4, 1, 0, 2, 124, 76, }, + { 5, 1, 0, 2, 124, 127, }, + { 6, 1, 0, 2, 124, 76, }, + { 7, 1, 0, 2, 124, 54, }, + { 8, 1, 0, 2, 124, 76, }, + { 1, 1, 0, 2, 128, 76, }, + { 3, 1, 0, 2, 128, 127, }, + { 4, 1, 0, 2, 128, 76, }, + { 5, 1, 0, 2, 128, 127, }, + { 6, 1, 0, 2, 128, 76, }, + { 7, 1, 0, 2, 128, 54, }, + { 8, 1, 0, 2, 128, 76, }, + { 1, 1, 0, 2, 132, 76, }, + { 3, 1, 0, 2, 132, 76, }, + { 4, 1, 0, 2, 132, 76, }, + { 5, 1, 0, 2, 132, 62, }, + { 6, 1, 0, 2, 132, 76, }, + { 7, 1, 0, 2, 132, 54, }, + { 8, 1, 0, 2, 132, 76, }, + { 1, 1, 0, 2, 136, 76, }, + { 3, 1, 0, 2, 136, 76, }, + { 4, 1, 0, 2, 136, 76, }, + { 5, 1, 0, 2, 136, 62, }, + { 6, 1, 0, 2, 136, 76, }, + { 7, 1, 0, 2, 136, 127, }, + { 8, 1, 0, 2, 136, 76, }, + { 1, 1, 0, 2, 140, 76, }, + { 3, 1, 0, 2, 140, 70, }, + { 4, 1, 0, 2, 140, 76, }, + { 5, 1, 0, 2, 140, 62, }, + { 6, 1, 0, 2, 140, 70, }, + { 7, 1, 0, 2, 140, 127, }, + { 8, 1, 0, 2, 140, 70, }, + { 1, 1, 0, 2, 144, 127, }, + { 3, 1, 0, 2, 144, 76, }, + { 4, 1, 0, 2, 144, 76, }, + { 5, 1, 0, 2, 144, 127, }, + { 6, 1, 0, 2, 144, 76, }, + { 7, 1, 0, 2, 144, 127, }, + { 8, 1, 0, 2, 144, 76, }, + { 1, 1, 0, 2, 149, 127, }, + { 3, 1, 0, 2, 149, 76, }, + { 4, 1, 0, 2, 149, 74, }, + { 5, 1, 0, 2, 149, 76, }, + { 6, 1, 0, 2, 149, 76, }, + { 7, 1, 0, 2, 149, 54, }, + { 8, 1, 0, 2, 149, 76, }, + { 1, 1, 0, 2, 153, 127, }, + { 3, 1, 0, 2, 153, 76, }, + { 4, 1, 0, 2, 153, 74, }, + { 5, 1, 0, 2, 153, 76, }, + { 6, 1, 0, 2, 153, 76, }, + { 7, 1, 0, 2, 153, 54, }, + { 8, 1, 0, 2, 153, 76, }, + { 1, 1, 0, 2, 157, 127, }, + { 3, 1, 0, 2, 157, 76, }, + { 4, 1, 0, 2, 157, 74, }, + { 5, 1, 0, 2, 157, 76, }, + { 6, 1, 0, 2, 157, 76, }, + { 7, 1, 0, 2, 157, 54, }, + { 8, 1, 0, 2, 157, 76, }, + { 1, 1, 0, 2, 161, 127, }, + { 3, 1, 0, 2, 161, 76, }, + { 4, 1, 0, 2, 161, 74, }, + { 5, 1, 0, 2, 161, 76, }, + { 6, 1, 0, 2, 161, 76, }, + { 7, 1, 0, 2, 161, 54, }, + { 8, 1, 0, 2, 161, 76, }, + { 1, 1, 0, 2, 165, 127, }, + { 3, 1, 0, 2, 165, 76, }, + { 4, 1, 0, 2, 165, 74, }, + { 5, 1, 0, 2, 165, 76, }, + { 6, 1, 0, 2, 165, 76, }, + { 7, 1, 0, 2, 165, 54, }, + { 8, 1, 0, 2, 165, 76, }, + { 1, 1, 0, 3, 36, 50, }, + { 3, 1, 0, 3, 36, 38, }, + { 4, 1, 0, 3, 36, 66, }, + { 5, 1, 0, 3, 36, 38, }, + { 6, 1, 0, 3, 36, 52, }, + { 7, 1, 0, 3, 36, 30, }, + { 8, 1, 0, 3, 36, 50, }, + { 1, 1, 0, 3, 40, 50, }, + { 3, 1, 0, 3, 40, 38, }, + { 4, 1, 0, 3, 40, 66, }, + { 5, 1, 0, 3, 40, 38, }, + { 6, 1, 0, 3, 40, 52, }, + { 7, 1, 0, 3, 40, 30, }, + { 8, 1, 0, 3, 40, 50, }, + { 1, 1, 0, 3, 44, 50, }, + { 3, 1, 0, 3, 44, 38, }, + { 4, 1, 0, 3, 44, 66, }, + { 5, 1, 0, 3, 44, 38, }, + { 6, 1, 0, 3, 44, 52, }, + { 7, 1, 0, 3, 44, 30, }, + { 8, 1, 0, 3, 44, 50, }, + { 1, 1, 0, 3, 48, 50, }, + { 3, 1, 0, 3, 48, 38, }, + { 4, 1, 0, 3, 48, 66, }, + { 5, 1, 0, 3, 48, 38, }, + { 6, 1, 0, 3, 48, 52, }, + { 7, 1, 0, 3, 48, 30, }, + { 8, 1, 0, 3, 48, 50, }, + { 1, 1, 0, 3, 52, 50, }, + { 3, 1, 0, 3, 52, 40, }, + { 4, 1, 0, 3, 52, 66, }, + { 5, 1, 0, 3, 52, 38, }, + { 6, 1, 0, 3, 52, 68, }, + { 7, 1, 0, 3, 52, 30, }, + { 8, 1, 0, 3, 52, 68, }, + { 1, 1, 0, 3, 56, 50, }, + { 3, 1, 0, 3, 56, 40, }, + { 4, 1, 0, 3, 56, 66, }, + { 5, 1, 0, 3, 56, 38, }, + { 6, 1, 0, 3, 56, 68, }, + { 7, 1, 0, 3, 56, 30, }, + { 8, 1, 0, 3, 56, 68, }, + { 1, 1, 0, 3, 60, 50, }, + { 3, 1, 0, 3, 60, 40, }, + { 4, 1, 0, 3, 60, 66, }, + { 5, 1, 0, 3, 60, 38, }, + { 6, 1, 0, 3, 60, 66, }, + { 7, 1, 0, 3, 60, 30, }, + { 8, 1, 0, 3, 60, 66, }, + { 1, 1, 0, 3, 64, 50, }, + { 3, 1, 0, 3, 64, 40, }, + { 4, 1, 0, 3, 64, 66, }, + { 5, 1, 0, 3, 64, 38, }, + { 6, 1, 0, 3, 64, 68, }, + { 7, 1, 0, 3, 64, 30, }, + { 8, 1, 0, 3, 64, 68, }, + { 1, 1, 0, 3, 100, 70, }, + { 3, 1, 0, 3, 100, 60, }, + { 4, 1, 0, 3, 100, 64, }, + { 5, 1, 0, 3, 100, 38, }, + { 6, 1, 0, 3, 100, 60, }, + { 7, 1, 0, 3, 100, 30, }, + { 8, 1, 0, 3, 100, 60, }, + { 1, 1, 0, 3, 104, 70, }, + { 3, 1, 0, 3, 104, 68, }, + { 4, 1, 0, 3, 104, 64, }, + { 5, 1, 0, 3, 104, 38, }, + { 6, 1, 0, 3, 104, 68, }, + { 7, 1, 0, 3, 104, 30, }, + { 8, 1, 0, 3, 104, 68, }, + { 1, 1, 0, 3, 108, 70, }, + { 3, 1, 0, 3, 108, 68, }, + { 4, 1, 0, 3, 108, 64, }, + { 5, 1, 0, 3, 108, 38, }, + { 6, 1, 0, 3, 108, 68, }, + { 7, 1, 0, 3, 108, 30, }, + { 8, 1, 0, 3, 108, 68, }, + { 1, 1, 0, 3, 112, 70, }, + { 3, 1, 0, 3, 112, 68, }, + { 4, 1, 0, 3, 112, 64, }, + { 5, 1, 0, 3, 112, 38, }, + { 6, 1, 0, 3, 112, 68, }, + { 7, 1, 0, 3, 112, 30, }, + { 8, 1, 0, 3, 112, 68, }, + { 1, 1, 0, 3, 116, 70, }, + { 3, 1, 0, 3, 116, 68, }, + { 4, 1, 0, 3, 116, 64, }, + { 5, 1, 0, 3, 116, 38, }, + { 6, 1, 0, 3, 116, 68, }, + { 7, 1, 0, 3, 116, 30, }, + { 8, 1, 0, 3, 116, 68, }, + { 1, 1, 0, 3, 120, 70, }, + { 3, 1, 0, 3, 120, 127, }, + { 4, 1, 0, 3, 120, 64, }, + { 5, 1, 0, 3, 120, 127, }, + { 6, 1, 0, 3, 120, 68, }, + { 7, 1, 0, 3, 120, 30, }, + { 8, 1, 0, 3, 120, 68, }, + { 1, 1, 0, 3, 124, 70, }, + { 3, 1, 0, 3, 124, 127, }, + { 4, 1, 0, 3, 124, 64, }, + { 5, 1, 0, 3, 124, 127, }, + { 6, 1, 0, 3, 124, 68, }, + { 7, 1, 0, 3, 124, 30, }, + { 8, 1, 0, 3, 124, 68, }, + { 1, 1, 0, 3, 128, 70, }, + { 3, 1, 0, 3, 128, 127, }, + { 4, 1, 0, 3, 128, 64, }, + { 5, 1, 0, 3, 128, 127, }, + { 6, 1, 0, 3, 128, 68, }, + { 7, 1, 0, 3, 128, 30, }, + { 8, 1, 0, 3, 128, 68, }, + { 1, 1, 0, 3, 132, 70, }, + { 3, 1, 0, 3, 132, 68, }, + { 4, 1, 0, 3, 132, 64, }, + { 5, 1, 0, 3, 132, 38, }, + { 6, 1, 0, 3, 132, 68, }, + { 7, 1, 0, 3, 132, 30, }, + { 8, 1, 0, 3, 132, 68, }, + { 1, 1, 0, 3, 136, 70, }, + { 3, 1, 0, 3, 136, 68, }, + { 4, 1, 0, 3, 136, 64, }, + { 5, 1, 0, 3, 136, 38, }, + { 6, 1, 0, 3, 136, 68, }, + { 7, 1, 0, 3, 136, 127, }, + { 8, 1, 0, 3, 136, 68, }, + { 1, 1, 0, 3, 140, 70, }, + { 3, 1, 0, 3, 140, 60, }, + { 4, 1, 0, 3, 140, 64, }, + { 5, 1, 0, 3, 140, 38, }, + { 6, 1, 0, 3, 140, 60, }, + { 7, 1, 0, 3, 140, 127, }, + { 8, 1, 0, 3, 140, 60, }, + { 1, 1, 0, 3, 144, 127, }, + { 3, 1, 0, 3, 144, 68, }, + { 4, 1, 0, 3, 144, 64, }, + { 5, 1, 0, 3, 144, 127, }, + { 6, 1, 0, 3, 144, 68, }, + { 7, 1, 0, 3, 144, 127, }, + { 8, 1, 0, 3, 144, 68, }, + { 1, 1, 0, 3, 149, 127, }, + { 3, 1, 0, 3, 149, 76, }, + { 4, 1, 0, 3, 149, 60, }, + { 5, 1, 0, 3, 149, 76, }, + { 6, 1, 0, 3, 149, 76, }, + { 7, 1, 0, 3, 149, 30, }, + { 8, 1, 0, 3, 149, 72, }, + { 1, 1, 0, 3, 153, 127, }, + { 3, 1, 0, 3, 153, 76, }, + { 4, 1, 0, 3, 153, 60, }, + { 5, 1, 0, 3, 153, 76, }, + { 6, 1, 0, 3, 153, 76, }, + { 7, 1, 0, 3, 153, 30, }, + { 8, 1, 0, 3, 153, 76, }, + { 1, 1, 0, 3, 157, 127, }, + { 3, 1, 0, 3, 157, 76, }, + { 4, 1, 0, 3, 157, 60, }, + { 5, 1, 0, 3, 157, 76, }, + { 6, 1, 0, 3, 157, 76, }, + { 7, 1, 0, 3, 157, 30, }, + { 8, 1, 0, 3, 157, 76, }, + { 1, 1, 0, 3, 161, 127, }, + { 3, 1, 0, 3, 161, 76, }, + { 4, 1, 0, 3, 161, 60, }, + { 5, 1, 0, 3, 161, 76, }, + { 6, 1, 0, 3, 161, 76, }, + { 7, 1, 0, 3, 161, 30, }, + { 8, 1, 0, 3, 161, 76, }, + { 1, 1, 0, 3, 165, 127, }, + { 3, 1, 0, 3, 165, 76, }, + { 4, 1, 0, 3, 165, 60, }, + { 5, 1, 0, 3, 165, 76, }, + { 6, 1, 0, 3, 165, 76, }, + { 7, 1, 0, 3, 165, 30, }, + { 8, 1, 0, 3, 165, 76, }, + { 1, 1, 1, 2, 38, 62, }, + { 3, 1, 1, 2, 38, 64, }, + { 4, 1, 1, 2, 38, 72, }, + { 5, 1, 1, 2, 38, 64, }, + { 6, 1, 1, 2, 38, 64, }, + { 7, 1, 1, 2, 38, 54, }, + { 8, 1, 1, 2, 38, 62, }, + { 1, 1, 1, 2, 46, 62, }, + { 3, 1, 1, 2, 46, 64, }, + { 4, 1, 1, 2, 46, 72, }, + { 5, 1, 1, 2, 46, 64, }, + { 6, 1, 1, 2, 46, 64, }, + { 7, 1, 1, 2, 46, 54, }, + { 8, 1, 1, 2, 46, 62, }, + { 1, 1, 1, 2, 54, 62, }, + { 3, 1, 1, 2, 54, 64, }, + { 4, 1, 1, 2, 54, 72, }, + { 5, 1, 1, 2, 54, 64, }, + { 6, 1, 1, 2, 54, 72, }, + { 7, 1, 1, 2, 54, 54, }, + { 8, 1, 1, 2, 54, 72, }, + { 1, 1, 1, 2, 62, 62, }, + { 3, 1, 1, 2, 62, 64, }, + { 4, 1, 1, 2, 62, 70, }, + { 5, 1, 1, 2, 62, 64, }, + { 6, 1, 1, 2, 62, 64, }, + { 7, 1, 1, 2, 62, 54, }, + { 8, 1, 1, 2, 62, 64, }, + { 1, 1, 1, 2, 102, 72, }, + { 3, 1, 1, 2, 102, 58, }, + { 4, 1, 1, 2, 102, 72, }, + { 5, 1, 1, 2, 102, 64, }, + { 6, 1, 1, 2, 102, 58, }, + { 7, 1, 1, 2, 102, 54, }, + { 8, 1, 1, 2, 102, 58, }, + { 1, 1, 1, 2, 110, 72, }, + { 3, 1, 1, 2, 110, 72, }, + { 4, 1, 1, 2, 110, 72, }, + { 5, 1, 1, 2, 110, 64, }, + { 6, 1, 1, 2, 110, 72, }, + { 7, 1, 1, 2, 110, 54, }, + { 8, 1, 1, 2, 110, 72, }, + { 1, 1, 1, 2, 118, 72, }, + { 3, 1, 1, 2, 118, 127, }, + { 4, 1, 1, 2, 118, 72, }, + { 5, 1, 1, 2, 118, 127, }, + { 6, 1, 1, 2, 118, 72, }, + { 7, 1, 1, 2, 118, 54, }, + { 8, 1, 1, 2, 118, 72, }, + { 1, 1, 1, 2, 126, 72, }, + { 3, 1, 1, 2, 126, 127, }, + { 4, 1, 1, 2, 126, 72, }, + { 5, 1, 1, 2, 126, 127, }, + { 6, 1, 1, 2, 126, 72, }, + { 7, 1, 1, 2, 126, 54, }, + { 8, 1, 1, 2, 126, 72, }, + { 1, 1, 1, 2, 134, 72, }, + { 3, 1, 1, 2, 134, 72, }, + { 4, 1, 1, 2, 134, 72, }, + { 5, 1, 1, 2, 134, 64, }, + { 6, 1, 1, 2, 134, 72, }, + { 7, 1, 1, 2, 134, 127, }, + { 8, 1, 1, 2, 134, 72, }, + { 1, 1, 1, 2, 142, 127, }, + { 3, 1, 1, 2, 142, 72, }, + { 4, 1, 1, 2, 142, 72, }, + { 5, 1, 1, 2, 142, 127, }, + { 6, 1, 1, 2, 142, 72, }, + { 7, 1, 1, 2, 142, 127, }, + { 8, 1, 1, 2, 142, 72, }, + { 1, 1, 1, 2, 151, 127, }, + { 3, 1, 1, 2, 151, 72, }, + { 4, 1, 1, 2, 151, 72, }, + { 5, 1, 1, 2, 151, 72, }, + { 6, 1, 1, 2, 151, 72, }, + { 7, 1, 1, 2, 151, 54, }, + { 8, 1, 1, 2, 151, 72, }, + { 1, 1, 1, 2, 159, 127, }, + { 3, 1, 1, 2, 159, 72, }, + { 4, 1, 1, 2, 159, 72, }, + { 5, 1, 1, 2, 159, 72, }, + { 6, 1, 1, 2, 159, 72, }, + { 7, 1, 1, 2, 159, 54, }, + { 8, 1, 1, 2, 159, 72, }, + { 1, 1, 1, 3, 38, 50, }, + { 3, 1, 1, 3, 38, 40, }, + { 4, 1, 1, 3, 38, 62, }, + { 5, 1, 1, 3, 38, 40, }, + { 6, 1, 1, 3, 38, 52, }, + { 7, 1, 1, 3, 38, 30, }, + { 8, 1, 1, 3, 38, 50, }, + { 1, 1, 1, 3, 46, 50, }, + { 3, 1, 1, 3, 46, 40, }, + { 4, 1, 1, 3, 46, 62, }, + { 5, 1, 1, 3, 46, 40, }, + { 6, 1, 1, 3, 46, 52, }, + { 7, 1, 1, 3, 46, 30, }, + { 8, 1, 1, 3, 46, 50, }, + { 1, 1, 1, 3, 54, 50, }, + { 3, 1, 1, 3, 54, 40, }, + { 4, 1, 1, 3, 54, 62, }, + { 5, 1, 1, 3, 54, 40, }, + { 6, 1, 1, 3, 54, 68, }, + { 7, 1, 1, 3, 54, 30, }, + { 8, 1, 1, 3, 54, 68, }, + { 1, 1, 1, 3, 62, 48, }, + { 3, 1, 1, 3, 62, 40, }, + { 4, 1, 1, 3, 62, 58, }, + { 5, 1, 1, 3, 62, 40, }, + { 6, 1, 1, 3, 62, 58, }, + { 7, 1, 1, 3, 62, 30, }, + { 8, 1, 1, 3, 62, 58, }, + { 1, 1, 1, 3, 102, 70, }, + { 3, 1, 1, 3, 102, 54, }, + { 4, 1, 1, 3, 102, 64, }, + { 5, 1, 1, 3, 102, 40, }, + { 6, 1, 1, 3, 102, 54, }, + { 7, 1, 1, 3, 102, 30, }, + { 8, 1, 1, 3, 102, 54, }, + { 1, 1, 1, 3, 110, 70, }, + { 3, 1, 1, 3, 110, 68, }, + { 4, 1, 1, 3, 110, 64, }, + { 5, 1, 1, 3, 110, 40, }, + { 6, 1, 1, 3, 110, 68, }, + { 7, 1, 1, 3, 110, 30, }, + { 8, 1, 1, 3, 110, 68, }, + { 1, 1, 1, 3, 118, 70, }, + { 3, 1, 1, 3, 118, 127, }, + { 4, 1, 1, 3, 118, 64, }, + { 5, 1, 1, 3, 118, 127, }, + { 6, 1, 1, 3, 118, 68, }, + { 7, 1, 1, 3, 118, 30, }, + { 8, 1, 1, 3, 118, 68, }, + { 1, 1, 1, 3, 126, 70, }, + { 3, 1, 1, 3, 126, 127, }, + { 4, 1, 1, 3, 126, 64, }, + { 5, 1, 1, 3, 126, 127, }, + { 6, 1, 1, 3, 126, 68, }, + { 7, 1, 1, 3, 126, 30, }, + { 8, 1, 1, 3, 126, 68, }, + { 1, 1, 1, 3, 134, 70, }, + { 3, 1, 1, 3, 134, 68, }, + { 4, 1, 1, 3, 134, 64, }, + { 5, 1, 1, 3, 134, 40, }, + { 6, 1, 1, 3, 134, 68, }, + { 7, 1, 1, 3, 134, 127, }, + { 8, 1, 1, 3, 134, 68, }, + { 1, 1, 1, 3, 142, 127, }, + { 3, 1, 1, 3, 142, 68, }, + { 4, 1, 1, 3, 142, 64, }, + { 5, 1, 1, 3, 142, 127, }, + { 6, 1, 1, 3, 142, 68, }, + { 7, 1, 1, 3, 142, 127, }, + { 8, 1, 1, 3, 142, 68, }, + { 1, 1, 1, 3, 151, 127, }, + { 3, 1, 1, 3, 151, 72, }, + { 4, 1, 1, 3, 151, 66, }, + { 5, 1, 1, 3, 151, 72, }, + { 6, 1, 1, 3, 151, 72, }, + { 7, 1, 1, 3, 151, 30, }, + { 8, 1, 1, 3, 151, 68, }, + { 1, 1, 1, 3, 159, 127, }, + { 3, 1, 1, 3, 159, 72, }, + { 4, 1, 1, 3, 159, 66, }, + { 5, 1, 1, 3, 159, 72, }, + { 6, 1, 1, 3, 159, 72, }, + { 7, 1, 1, 3, 159, 30, }, + { 8, 1, 1, 3, 159, 72, }, + { 1, 1, 2, 4, 42, 64, }, + { 3, 1, 2, 4, 42, 64, }, + { 4, 1, 2, 4, 42, 68, }, + { 5, 1, 2, 4, 42, 64, }, + { 6, 1, 2, 4, 42, 64, }, + { 7, 1, 2, 4, 42, 54, }, + { 8, 1, 2, 4, 42, 62, }, + { 1, 1, 2, 4, 58, 64, }, + { 3, 1, 2, 4, 58, 62, }, + { 4, 1, 2, 4, 58, 64, }, + { 5, 1, 2, 4, 58, 64, }, + { 6, 1, 2, 4, 58, 62, }, + { 7, 1, 2, 4, 58, 54, }, + { 8, 1, 2, 4, 58, 62, }, + { 1, 1, 2, 4, 106, 72, }, + { 3, 1, 2, 4, 106, 58, }, + { 4, 1, 2, 4, 106, 66, }, + { 5, 1, 2, 4, 106, 64, }, + { 6, 1, 2, 4, 106, 58, }, + { 7, 1, 2, 4, 106, 54, }, + { 8, 1, 2, 4, 106, 58, }, + { 1, 1, 2, 4, 122, 72, }, + { 3, 1, 2, 4, 122, 127, }, + { 4, 1, 2, 4, 122, 68, }, + { 5, 1, 2, 4, 122, 127, }, + { 6, 1, 2, 4, 122, 72, }, + { 7, 1, 2, 4, 122, 54, }, + { 8, 1, 2, 4, 122, 72, }, + { 1, 1, 2, 4, 138, 127, }, + { 3, 1, 2, 4, 138, 72, }, + { 4, 1, 2, 4, 138, 68, }, + { 5, 1, 2, 4, 138, 127, }, + { 6, 1, 2, 4, 138, 72, }, + { 7, 1, 2, 4, 138, 127, }, + { 8, 1, 2, 4, 138, 72, }, + { 1, 1, 2, 4, 155, 127, }, + { 3, 1, 2, 4, 155, 72, }, + { 4, 1, 2, 4, 155, 68, }, + { 5, 1, 2, 4, 155, 72, }, + { 6, 1, 2, 4, 155, 72, }, + { 7, 1, 2, 4, 155, 54, }, + { 8, 1, 2, 4, 155, 68, }, + { 1, 1, 2, 5, 42, 50, }, + { 3, 1, 2, 5, 42, 40, }, + { 4, 1, 2, 5, 42, 58, }, + { 5, 1, 2, 5, 42, 40, }, + { 6, 1, 2, 5, 42, 52, }, + { 7, 1, 2, 5, 42, 30, }, + { 8, 1, 2, 5, 42, 50, }, + { 1, 1, 2, 5, 58, 50, }, + { 3, 1, 2, 5, 58, 40, }, + { 4, 1, 2, 5, 58, 56, }, + { 5, 1, 2, 5, 58, 40, }, + { 6, 1, 2, 5, 58, 52, }, + { 7, 1, 2, 5, 58, 30, }, + { 8, 1, 2, 5, 58, 52, }, + { 1, 1, 2, 5, 106, 72, }, + { 3, 1, 2, 5, 106, 50, }, + { 4, 1, 2, 5, 106, 56, }, + { 5, 1, 2, 5, 106, 40, }, + { 6, 1, 2, 5, 106, 50, }, + { 7, 1, 2, 5, 106, 30, }, + { 8, 1, 2, 5, 106, 50, }, + { 1, 1, 2, 5, 122, 72, }, + { 3, 1, 2, 5, 122, 127, }, + { 4, 1, 2, 5, 122, 56, }, + { 5, 1, 2, 5, 122, 127, }, + { 6, 1, 2, 5, 122, 66, }, + { 7, 1, 2, 5, 122, 30, }, + { 8, 1, 2, 5, 122, 66, }, + { 1, 1, 2, 5, 138, 127, }, + { 3, 1, 2, 5, 138, 66, }, + { 4, 1, 2, 5, 138, 58, }, + { 5, 1, 2, 5, 138, 127, }, + { 6, 1, 2, 5, 138, 66, }, + { 7, 1, 2, 5, 138, 127, }, + { 8, 1, 2, 5, 138, 66, }, + { 1, 1, 2, 5, 155, 127, }, + { 3, 1, 2, 5, 155, 62, }, + { 4, 1, 2, 5, 155, 58, }, + { 5, 1, 2, 5, 155, 72, }, + { 6, 1, 2, 5, 155, 62, }, + { 7, 1, 2, 5, 155, 30, }, + { 8, 1, 2, 5, 155, 62, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0); +static const u32 rtw8822c_dpk_afe_no_dpk[] = { + 0x18a4, BIT(7), 0, + 0x41a4, BIT(7), 0, + 0x1c38, MASKDWORD, 0xffa1005e, + 0x1830, MASKDWORD, 0x700b8041, + 0x1830, MASKDWORD, 0x70144041, + 0x1830, MASKDWORD, 0x70244041, + 0x1830, MASKDWORD, 0x70344041, + 0x1830, MASKDWORD, 0x70444041, + 0x1830, MASKDWORD, 0x705b8041, + 0x1830, MASKDWORD, 0x70644041, + 0x4130, MASKDWORD, 0x700b8041, + 0x4130, MASKDWORD, 0x70144041, + 0x4130, MASKDWORD, 0x70244041, + 0x4130, MASKDWORD, 0x70344041, + 0x4130, MASKDWORD, 0x70444041, + 0x4130, MASKDWORD, 0x705b8041, + 0x4130, MASKDWORD, 0x70644041, + 0x1830, MASKDWORD, 0x707b8041, + 0x1830, MASKDWORD, 0x708b8041, + 0x1830, MASKDWORD, 0x709b8041, + 0x1830, MASKDWORD, 0x70ab8041, + 0x1830, MASKDWORD, 0x70bb8041, + 0x1830, MASKDWORD, 0x70cb8041, + 0x1830, MASKDWORD, 0x70db8041, + 0x1830, MASKDWORD, 0x70eb8041, + 0x1830, MASKDWORD, 0x70fb8041, + 0x4130, MASKDWORD, 0x707b8041, + 0x4130, MASKDWORD, 0x708b8041, + 0x4130, MASKDWORD, 0x709b8041, + 0x4130, MASKDWORD, 0x70ab8041, + 0x4130, MASKDWORD, 0x70bb8041, + 0x4130, MASKDWORD, 0x70cb8041, + 0x4130, MASKDWORD, 0x70db8041, + 0x4130, MASKDWORD, 0x70eb8041, + 0x4130, MASKDWORD, 0x70fb8041, +}; + +RTW_DECL_TABLE_DPK(rtw8822c_dpk_afe_no_dpk); + +static const u32 rtw8822c_dpk_afe_is_dpk[] = { + 0x1c38, MASKDWORD, 0xFFFFFFFF, + 0x1830, MASKDWORD, 0x700f0001, + 0x1830, MASKDWORD, 0x700f0001, + 0x1830, MASKDWORD, 0x701f0001, + 0x1830, MASKDWORD, 0x702f0001, + 0x1830, MASKDWORD, 0x703f0001, + 0x1830, MASKDWORD, 0x704f0001, + 0x1830, MASKDWORD, 0x705f0001, + 0x1830, MASKDWORD, 0x706f0001, + 0x1830, MASKDWORD, 0x707f0001, + 0x1830, MASKDWORD, 0x708f0001, + 0x1830, MASKDWORD, 0x709f0001, + 0x1830, MASKDWORD, 0x70af0001, + 0x1830, MASKDWORD, 0x70bf0001, + 0x1830, MASKDWORD, 0x70cf0001, + 0x1830, MASKDWORD, 0x70df0001, + 0x1830, MASKDWORD, 0x70ef0001, + 0x1830, MASKDWORD, 0x70ff0001, + 0x1830, MASKDWORD, 0x70ff0001, + 0x4130, MASKDWORD, 0x700f0001, + 0x4130, MASKDWORD, 0x700f0001, + 0x4130, MASKDWORD, 0x701f0001, + 0x4130, MASKDWORD, 0x702f0001, + 0x4130, MASKDWORD, 0x703f0001, + 0x4130, MASKDWORD, 0x704f0001, + 0x4130, MASKDWORD, 0x705f0001, + 0x4130, MASKDWORD, 0x706f0001, + 0x4130, MASKDWORD, 0x707f0001, + 0x4130, MASKDWORD, 0x708f0001, + 0x4130, MASKDWORD, 0x709f0001, + 0x4130, MASKDWORD, 0x70af0001, + 0x4130, MASKDWORD, 0x70bf0001, + 0x4130, MASKDWORD, 0x70cf0001, + 0x4130, MASKDWORD, 0x70df0001, + 0x4130, MASKDWORD, 0x70ef0001, + 0x4130, MASKDWORD, 0x70ff0001, + 0x4130, MASKDWORD, 0x70ff0001, + 0x18a4, BIT(7), 1, + 0x41a4, BIT(7), 1, +}; + +RTW_DECL_TABLE_DPK(rtw8822c_dpk_afe_is_dpk); + +static const u32 rtw8822c_dpk_mac_bb[] = { + 0x1e24, BIT(17), 0x1, + 0x1d58, GENMASK(11, 3), 0x1ff, + 0x1864, BIT(31), 0x1, + 0x4164, BIT(31), 0x1, + 0x180c, BIT(27), 0x1, + 0x410c, BIT(27), 0x1, + 0x186c, BIT(7), 0x1, + 0x416c, BIT(7), 0x1, + 0x180c, GENMASK(1, 0), 0x0, + 0x410c, GENMASK(1, 0), 0x0, + 0x1a14, GENMASK(9, 8), 0x3, + 0x80c, GENMASK(3, 0), 0x8, + 0x824, GENMASK(19, 16), 0x3, + 0x824, GENMASK(27, 24), 0x3, +}; + +RTW_DECL_TABLE_DPK(rtw8822c_dpk_mac_bb); + static const u32 rtw8822c_array_mp_cal_init[] = { 0x1b00, 0x00000008, 0x1b00, 0x00A70008, @@ -10310,6 +13601,7 @@ static const u32 rtw8822c_array_mp_cal_init[] = { 0x1b9c, 0x00000000, 0x1bc0, 0x01000000, 0x1bcc, 0x00000000, + 0x1bd8, 0xe0000001, 0x1be4, 0x00000000, 0x1bec, 0x40000000, 0x1b40, 0x40000000, @@ -10573,6 +13865,7 @@ static const u32 rtw8822c_array_mp_cal_init[] = { 0x1b9c, 0x00000000, 0x1bc0, 0x01000000, 0x1bcc, 0x00000000, + 0x1bd8, 0xe0000001, 0x1be4, 0x00000000, 0x1bec, 0x40000000, 0x1b60, 0x1F100000, @@ -10779,1656 +14072,1970 @@ static const u32 rtw8822c_array_mp_cal_init[] = { 0x1b80, 0x00020257, 0x1b80, 0x30000265, 0x1b80, 0x30000267, - 0x1b80, 0xa5100275, - 0x1b80, 0xa5100277, - 0x1b80, 0xe3520285, - 0x1b80, 0xe3520287, - 0x1b80, 0xf01d0295, - 0x1b80, 0xf01d0297, - 0x1b80, 0xf11d02a5, - 0x1b80, 0xf11d02a7, - 0x1b80, 0xf21d02b5, - 0x1b80, 0xf21d02b7, - 0x1b80, 0xf31d02c5, - 0x1b80, 0xf31d02c7, - 0x1b80, 0xf41d02d5, - 0x1b80, 0xf41d02d7, - 0x1b80, 0xf51d02e5, - 0x1b80, 0xf51d02e7, - 0x1b80, 0xf61d02f5, - 0x1b80, 0xf61d02f7, - 0x1b80, 0xf71d0305, - 0x1b80, 0xf71d0307, - 0x1b80, 0xf81d0315, - 0x1b80, 0xf81d0317, - 0x1b80, 0xf91d0325, - 0x1b80, 0xf91d0327, - 0x1b80, 0xfa1d0335, - 0x1b80, 0xfa1d0337, - 0x1b80, 0xfb1d0345, - 0x1b80, 0xfb1d0347, - 0x1b80, 0xfc1d0355, - 0x1b80, 0xfc1d0357, - 0x1b80, 0xfd1d0365, - 0x1b80, 0xfd1d0367, - 0x1b80, 0xf21d0375, - 0x1b80, 0xf21d0377, - 0x1b80, 0xf31d0385, - 0x1b80, 0xf31d0387, - 0x1b80, 0xf41d0395, - 0x1b80, 0xf41d0397, - 0x1b80, 0xf51d03a5, - 0x1b80, 0xf51d03a7, - 0x1b80, 0xf61d03b5, - 0x1b80, 0xf61d03b7, - 0x1b80, 0xf71d03c5, - 0x1b80, 0xf71d03c7, - 0x1b80, 0xf81d03d5, - 0x1b80, 0xf81d03d7, - 0x1b80, 0xf91d03e5, - 0x1b80, 0xf91d03e7, - 0x1b80, 0xfa1d03f5, - 0x1b80, 0xfa1d03f7, - 0x1b80, 0xfb1d0405, - 0x1b80, 0xfb1d0407, - 0x1b80, 0xfc1d0415, - 0x1b80, 0xfc1d0417, - 0x1b80, 0xfd1d0425, - 0x1b80, 0xfd1d0427, - 0x1b80, 0xfe1d0435, - 0x1b80, 0xfe1d0437, - 0x1b80, 0xff1d0445, - 0x1b80, 0xff1d0447, - 0x1b80, 0x00010455, - 0x1b80, 0x00010457, - 0x1b80, 0x30620465, - 0x1b80, 0x30620467, - 0x1b80, 0x307a0475, - 0x1b80, 0x307a0477, - 0x1b80, 0x307c0485, - 0x1b80, 0x307c0487, - 0x1b80, 0x30eb0495, - 0x1b80, 0x30eb0497, - 0x1b80, 0x308004a5, - 0x1b80, 0x308004a7, - 0x1b80, 0x308c04b5, - 0x1b80, 0x308c04b7, - 0x1b80, 0x309804c5, - 0x1b80, 0x309804c7, - 0x1b80, 0x307f04d5, - 0x1b80, 0x307f04d7, - 0x1b80, 0x308b04e5, - 0x1b80, 0x308b04e7, - 0x1b80, 0x309704f5, - 0x1b80, 0x309704f7, - 0x1b80, 0x30ef0505, - 0x1b80, 0x30ef0507, - 0x1b80, 0x30fa0515, - 0x1b80, 0x30fa0517, - 0x1b80, 0x31050525, - 0x1b80, 0x31050527, - 0x1b80, 0x316a0535, - 0x1b80, 0x316a0537, - 0x1b80, 0x307a0545, - 0x1b80, 0x307a0547, - 0x1b80, 0x30e90555, - 0x1b80, 0x30e90557, - 0x1b80, 0x31870565, - 0x1b80, 0x31870567, - 0x1b80, 0x31a00575, - 0x1b80, 0x31a00577, - 0x1b80, 0x31ba0585, - 0x1b80, 0x31ba0587, - 0x1b80, 0x31c20595, - 0x1b80, 0x31c20597, - 0x1b80, 0x31ca05a5, - 0x1b80, 0x31ca05a7, - 0x1b80, 0x31d205b5, - 0x1b80, 0x31d205b7, - 0x1b80, 0x31da05c5, - 0x1b80, 0x31da05c7, - 0x1b80, 0x31e905d5, - 0x1b80, 0x31e905d7, - 0x1b80, 0x31f805e5, - 0x1b80, 0x31f805e7, - 0x1b80, 0x31fe05f5, - 0x1b80, 0x31fe05f7, - 0x1b80, 0x32040605, - 0x1b80, 0x32040607, - 0x1b80, 0x320a0615, - 0x1b80, 0x320a0617, - 0x1b80, 0xe2eb0625, - 0x1b80, 0xe2eb0627, - 0x1b80, 0x4d040635, - 0x1b80, 0x4d040637, - 0x1b80, 0x20800645, - 0x1b80, 0x20800647, - 0x1b80, 0x00000655, - 0x1b80, 0x00000657, - 0x1b80, 0x4d000665, - 0x1b80, 0x4d000667, - 0x1b80, 0x55070675, - 0x1b80, 0x55070677, - 0x1b80, 0xe2e30685, - 0x1b80, 0xe2e30687, - 0x1b80, 0xe2e30695, - 0x1b80, 0xe2e30697, - 0x1b80, 0x4d0406a5, - 0x1b80, 0x4d0406a7, - 0x1b80, 0x208806b5, - 0x1b80, 0x208806b7, - 0x1b80, 0x020006c5, - 0x1b80, 0x020006c7, - 0x1b80, 0x4d0006d5, - 0x1b80, 0x4d0006d7, - 0x1b80, 0x550f06e5, - 0x1b80, 0x550f06e7, - 0x1b80, 0xe2e306f5, - 0x1b80, 0xe2e306f7, - 0x1b80, 0x4f020705, - 0x1b80, 0x4f020707, - 0x1b80, 0x4e000715, - 0x1b80, 0x4e000717, - 0x1b80, 0x53020725, - 0x1b80, 0x53020727, - 0x1b80, 0x52010735, - 0x1b80, 0x52010737, - 0x1b80, 0xe2e70745, - 0x1b80, 0xe2e70747, - 0x1b80, 0x4d080755, - 0x1b80, 0x4d080757, - 0x1b80, 0x57100765, - 0x1b80, 0x57100767, - 0x1b80, 0x57000775, - 0x1b80, 0x57000777, - 0x1b80, 0x4d000785, - 0x1b80, 0x4d000787, - 0x1b80, 0x00010795, - 0x1b80, 0x00010797, - 0x1b80, 0xe2eb07a5, - 0x1b80, 0xe2eb07a7, - 0x1b80, 0x000107b5, - 0x1b80, 0x000107b7, - 0x1b80, 0x620607c5, - 0x1b80, 0x620607c7, - 0x1b80, 0xe2eb07d5, - 0x1b80, 0xe2eb07d7, - 0x1b80, 0x000107e5, - 0x1b80, 0x000107e7, - 0x1b80, 0x620607f5, - 0x1b80, 0x620607f7, - 0x1b80, 0x30ad0805, - 0x1b80, 0x30ad0807, - 0x1b80, 0x00260815, - 0x1b80, 0x00260817, - 0x1b80, 0xe3450825, - 0x1b80, 0xe3450827, - 0x1b80, 0x00020835, - 0x1b80, 0x00020837, - 0x1b80, 0x54ec0845, - 0x1b80, 0x54ec0847, - 0x1b80, 0x0ba60855, - 0x1b80, 0x0ba60857, - 0x1b80, 0x00260865, - 0x1b80, 0x00260867, - 0x1b80, 0xe3450875, - 0x1b80, 0xe3450877, - 0x1b80, 0x00020885, - 0x1b80, 0x00020887, - 0x1b80, 0x63c30895, - 0x1b80, 0x63c30897, - 0x1b80, 0x30d908a5, - 0x1b80, 0x30d908a7, - 0x1b80, 0x620608b5, - 0x1b80, 0x620608b7, - 0x1b80, 0x30a508c5, - 0x1b80, 0x30a508c7, - 0x1b80, 0x002408d5, - 0x1b80, 0x002408d7, - 0x1b80, 0xe34508e5, - 0x1b80, 0xe34508e7, - 0x1b80, 0x000208f5, - 0x1b80, 0x000208f7, - 0x1b80, 0x54ea0905, - 0x1b80, 0x54ea0907, - 0x1b80, 0x0ba60915, - 0x1b80, 0x0ba60917, - 0x1b80, 0x00240925, - 0x1b80, 0x00240927, - 0x1b80, 0xe3450935, - 0x1b80, 0xe3450937, - 0x1b80, 0x00020945, - 0x1b80, 0x00020947, - 0x1b80, 0x63c30955, - 0x1b80, 0x63c30957, - 0x1b80, 0x30d90965, - 0x1b80, 0x30d90967, + 0x1b80, 0xa5110275, + 0x1b80, 0xa5110277, + 0x1b80, 0xe3ef0285, + 0x1b80, 0xe3ef0287, + 0x1b80, 0xf01f0295, + 0x1b80, 0xf01f0297, + 0x1b80, 0xf11f02a5, + 0x1b80, 0xf11f02a7, + 0x1b80, 0xf21f02b5, + 0x1b80, 0xf21f02b7, + 0x1b80, 0xf31f02c5, + 0x1b80, 0xf31f02c7, + 0x1b80, 0xf41f02d5, + 0x1b80, 0xf41f02d7, + 0x1b80, 0xf51f02e5, + 0x1b80, 0xf51f02e7, + 0x1b80, 0xf61f02f5, + 0x1b80, 0xf61f02f7, + 0x1b80, 0xf71f0305, + 0x1b80, 0xf71f0307, + 0x1b80, 0xf81f0315, + 0x1b80, 0xf81f0317, + 0x1b80, 0xf91f0325, + 0x1b80, 0xf91f0327, + 0x1b80, 0xfa1f0335, + 0x1b80, 0xfa1f0337, + 0x1b80, 0xfb1f0345, + 0x1b80, 0xfb1f0347, + 0x1b80, 0xfc1f0355, + 0x1b80, 0xfc1f0357, + 0x1b80, 0xfd1f0365, + 0x1b80, 0xfd1f0367, + 0x1b80, 0xfe1f0375, + 0x1b80, 0xfe1f0377, + 0x1b80, 0xf11f0385, + 0x1b80, 0xf11f0387, + 0x1b80, 0xf21f0395, + 0x1b80, 0xf21f0397, + 0x1b80, 0xf31f03a5, + 0x1b80, 0xf31f03a7, + 0x1b80, 0xf41f03b5, + 0x1b80, 0xf41f03b7, + 0x1b80, 0xf51f03c5, + 0x1b80, 0xf51f03c7, + 0x1b80, 0xf61f03d5, + 0x1b80, 0xf61f03d7, + 0x1b80, 0xf71f03e5, + 0x1b80, 0xf71f03e7, + 0x1b80, 0xf81f03f5, + 0x1b80, 0xf81f03f7, + 0x1b80, 0xf91f0405, + 0x1b80, 0xf91f0407, + 0x1b80, 0xfa1f0415, + 0x1b80, 0xfa1f0417, + 0x1b80, 0xfb1f0425, + 0x1b80, 0xfb1f0427, + 0x1b80, 0xfc1f0435, + 0x1b80, 0xfc1f0437, + 0x1b80, 0xfd1f0445, + 0x1b80, 0xfd1f0447, + 0x1b80, 0xfe1f0455, + 0x1b80, 0xfe1f0457, + 0x1b80, 0xff1f0465, + 0x1b80, 0xff1f0467, + 0x1b80, 0x00010475, + 0x1b80, 0x00010477, + 0x1b80, 0x30660485, + 0x1b80, 0x30660487, + 0x1b80, 0x307e0495, + 0x1b80, 0x307e0497, + 0x1b80, 0x308204a5, + 0x1b80, 0x308204a7, + 0x1b80, 0x310c04b5, + 0x1b80, 0x310c04b7, + 0x1b80, 0x308904c5, + 0x1b80, 0x308904c7, + 0x1b80, 0x309804d5, + 0x1b80, 0x309804d7, + 0x1b80, 0x30a704e5, + 0x1b80, 0x30a704e7, + 0x1b80, 0x308804f5, + 0x1b80, 0x308804f7, + 0x1b80, 0x30970505, + 0x1b80, 0x30970507, + 0x1b80, 0x30a60515, + 0x1b80, 0x30a60517, + 0x1b80, 0x31100525, + 0x1b80, 0x31100527, + 0x1b80, 0x311b0535, + 0x1b80, 0x311b0537, + 0x1b80, 0x31260545, + 0x1b80, 0x31260547, + 0x1b80, 0x31ae0555, + 0x1b80, 0x31ae0557, + 0x1b80, 0x318b0565, + 0x1b80, 0x318b0567, + 0x1b80, 0x31cb0575, + 0x1b80, 0x31cb0577, + 0x1b80, 0x307e0585, + 0x1b80, 0x307e0587, + 0x1b80, 0x310a0595, + 0x1b80, 0x310a0597, + 0x1b80, 0x31db05a5, + 0x1b80, 0x31db05a7, + 0x1b80, 0x31f405b5, + 0x1b80, 0x31f405b7, + 0x1b80, 0x320e05c5, + 0x1b80, 0x320e05c7, + 0x1b80, 0x321605d5, + 0x1b80, 0x321605d7, + 0x1b80, 0x321e05e5, + 0x1b80, 0x321e05e7, + 0x1b80, 0x322605f5, + 0x1b80, 0x322605f7, + 0x1b80, 0x322e0605, + 0x1b80, 0x322e0607, + 0x1b80, 0x323d0615, + 0x1b80, 0x323d0617, + 0x1b80, 0x324c0625, + 0x1b80, 0x324c0627, + 0x1b80, 0x32520635, + 0x1b80, 0x32520637, + 0x1b80, 0x32580645, + 0x1b80, 0x32580647, + 0x1b80, 0x325e0655, + 0x1b80, 0x325e0657, + 0x1b80, 0xe3880665, + 0x1b80, 0xe3880667, + 0x1b80, 0x4d040675, + 0x1b80, 0x4d040677, + 0x1b80, 0x20800685, + 0x1b80, 0x20800687, + 0x1b80, 0x00000695, + 0x1b80, 0x00000697, + 0x1b80, 0x4d0006a5, + 0x1b80, 0x4d0006a7, + 0x1b80, 0x550706b5, + 0x1b80, 0x550706b7, + 0x1b80, 0xe38006c5, + 0x1b80, 0xe38006c7, + 0x1b80, 0xe38006d5, + 0x1b80, 0xe38006d7, + 0x1b80, 0x4d0406e5, + 0x1b80, 0x4d0406e7, + 0x1b80, 0x208806f5, + 0x1b80, 0x208806f7, + 0x1b80, 0x02000705, + 0x1b80, 0x02000707, + 0x1b80, 0x4d000715, + 0x1b80, 0x4d000717, + 0x1b80, 0x550f0725, + 0x1b80, 0x550f0727, + 0x1b80, 0xe3800735, + 0x1b80, 0xe3800737, + 0x1b80, 0x4f020745, + 0x1b80, 0x4f020747, + 0x1b80, 0x4e000755, + 0x1b80, 0x4e000757, + 0x1b80, 0x53020765, + 0x1b80, 0x53020767, + 0x1b80, 0x52010775, + 0x1b80, 0x52010777, + 0x1b80, 0xe3840785, + 0x1b80, 0xe3840787, + 0x1b80, 0x4d080795, + 0x1b80, 0x4d080797, + 0x1b80, 0x571007a5, + 0x1b80, 0x571007a7, + 0x1b80, 0x570007b5, + 0x1b80, 0x570007b7, + 0x1b80, 0x4d0007c5, + 0x1b80, 0x4d0007c7, + 0x1b80, 0x000107d5, + 0x1b80, 0x000107d7, + 0x1b80, 0xe38807e5, + 0x1b80, 0xe38807e7, + 0x1b80, 0x0bbd07f5, + 0x1b80, 0x0bbd07f7, + 0x1b80, 0xe3e20805, + 0x1b80, 0xe3e20807, + 0x1b80, 0x00010815, + 0x1b80, 0x00010817, + 0x1b80, 0x62060825, + 0x1b80, 0x62060827, + 0x1b80, 0xe3880835, + 0x1b80, 0xe3880837, + 0x1b80, 0x0bbd0845, + 0x1b80, 0x0bbd0847, + 0x1b80, 0xe3e20855, + 0x1b80, 0xe3e20857, + 0x1b80, 0x00010865, + 0x1b80, 0x00010867, + 0x1b80, 0x00010875, + 0x1b80, 0x00010877, + 0x1b80, 0x62060885, + 0x1b80, 0x62060887, + 0x1b80, 0x30bc0895, + 0x1b80, 0x30bc0897, + 0x1b80, 0x002608a5, + 0x1b80, 0x002608a7, + 0x1b80, 0xe3e208b5, + 0x1b80, 0xe3e208b7, + 0x1b80, 0x000208c5, + 0x1b80, 0x000208c7, + 0x1b80, 0x54ec08d5, + 0x1b80, 0x54ec08d7, + 0x1b80, 0x0ba608e5, + 0x1b80, 0x0ba608e7, + 0x1b80, 0x002608f5, + 0x1b80, 0x002608f7, + 0x1b80, 0xe3e20905, + 0x1b80, 0xe3e20907, + 0x1b80, 0x00020915, + 0x1b80, 0x00020917, + 0x1b80, 0xf7f50925, + 0x1b80, 0xf7f50927, + 0x1b80, 0x00300935, + 0x1b80, 0x00300937, + 0x1b80, 0x63c30945, + 0x1b80, 0x63c30947, + 0x1b80, 0x00020955, + 0x1b80, 0x00020957, + 0x1b80, 0x318b0965, + 0x1b80, 0x318b0967, 0x1b80, 0x62060975, 0x1b80, 0x62060977, - 0x1b80, 0x6c100985, - 0x1b80, 0x6c100987, - 0x1b80, 0x6d0f0995, - 0x1b80, 0x6d0f0997, - 0x1b80, 0xe2eb09a5, - 0x1b80, 0xe2eb09a7, - 0x1b80, 0xe34509b5, - 0x1b80, 0xe34509b7, - 0x1b80, 0x6c2409c5, - 0x1b80, 0x6c2409c7, - 0x1b80, 0xe2eb09d5, - 0x1b80, 0xe2eb09d7, - 0x1b80, 0xe34509e5, - 0x1b80, 0xe34509e7, - 0x1b80, 0x6c4409f5, - 0x1b80, 0x6c4409f7, - 0x1b80, 0xe2eb0a05, - 0x1b80, 0xe2eb0a07, - 0x1b80, 0xe3450a15, - 0x1b80, 0xe3450a17, - 0x1b80, 0x6c640a25, - 0x1b80, 0x6c640a27, - 0x1b80, 0xe2eb0a35, - 0x1b80, 0xe2eb0a37, - 0x1b80, 0xe3450a45, - 0x1b80, 0xe3450a47, - 0x1b80, 0x0baa0a55, - 0x1b80, 0x0baa0a57, - 0x1b80, 0x6c840a65, - 0x1b80, 0x6c840a67, - 0x1b80, 0x6d0f0a75, - 0x1b80, 0x6d0f0a77, - 0x1b80, 0xe2eb0a85, - 0x1b80, 0xe2eb0a87, - 0x1b80, 0xe3450a95, - 0x1b80, 0xe3450a97, - 0x1b80, 0x6ca40aa5, - 0x1b80, 0x6ca40aa7, - 0x1b80, 0xe2eb0ab5, - 0x1b80, 0xe2eb0ab7, - 0x1b80, 0xe3450ac5, - 0x1b80, 0xe3450ac7, - 0x1b80, 0x0bac0ad5, - 0x1b80, 0x0bac0ad7, - 0x1b80, 0x6cc40ae5, - 0x1b80, 0x6cc40ae7, - 0x1b80, 0x6d0f0af5, - 0x1b80, 0x6d0f0af7, - 0x1b80, 0xe2eb0b05, - 0x1b80, 0xe2eb0b07, - 0x1b80, 0xe3450b15, - 0x1b80, 0xe3450b17, - 0x1b80, 0x6ce40b25, - 0x1b80, 0x6ce40b27, - 0x1b80, 0xe2eb0b35, - 0x1b80, 0xe2eb0b37, - 0x1b80, 0xe3450b45, - 0x1b80, 0xe3450b47, - 0x1b80, 0x6cf40b55, - 0x1b80, 0x6cf40b57, - 0x1b80, 0xe2eb0b65, - 0x1b80, 0xe2eb0b67, - 0x1b80, 0xe3450b75, - 0x1b80, 0xe3450b77, - 0x1b80, 0x6c0c0b85, - 0x1b80, 0x6c0c0b87, - 0x1b80, 0x6d000b95, - 0x1b80, 0x6d000b97, - 0x1b80, 0xe2eb0ba5, - 0x1b80, 0xe2eb0ba7, - 0x1b80, 0xe3450bb5, - 0x1b80, 0xe3450bb7, - 0x1b80, 0x6c1c0bc5, - 0x1b80, 0x6c1c0bc7, - 0x1b80, 0xe2eb0bd5, - 0x1b80, 0xe2eb0bd7, - 0x1b80, 0xe3450be5, - 0x1b80, 0xe3450be7, - 0x1b80, 0x6c3c0bf5, - 0x1b80, 0x6c3c0bf7, - 0x1b80, 0xe2eb0c05, - 0x1b80, 0xe2eb0c07, - 0x1b80, 0xe3450c15, - 0x1b80, 0xe3450c17, - 0x1b80, 0xf4bf0c25, - 0x1b80, 0xf4bf0c27, - 0x1b80, 0xf7be0c35, - 0x1b80, 0xf7be0c37, - 0x1b80, 0x6c5c0c45, - 0x1b80, 0x6c5c0c47, - 0x1b80, 0xe2eb0c55, - 0x1b80, 0xe2eb0c57, - 0x1b80, 0xe3450c65, - 0x1b80, 0xe3450c67, - 0x1b80, 0x6c7c0c75, - 0x1b80, 0x6c7c0c77, - 0x1b80, 0xe2eb0c85, - 0x1b80, 0xe2eb0c87, - 0x1b80, 0xe3450c95, - 0x1b80, 0xe3450c97, - 0x1b80, 0xf5c30ca5, - 0x1b80, 0xf5c30ca7, - 0x1b80, 0xf8c20cb5, - 0x1b80, 0xf8c20cb7, - 0x1b80, 0x6c9c0cc5, - 0x1b80, 0x6c9c0cc7, - 0x1b80, 0xe2eb0cd5, - 0x1b80, 0xe2eb0cd7, - 0x1b80, 0xe3450ce5, - 0x1b80, 0xe3450ce7, - 0x1b80, 0x6cbc0cf5, - 0x1b80, 0x6cbc0cf7, - 0x1b80, 0xe2eb0d05, - 0x1b80, 0xe2eb0d07, - 0x1b80, 0xe3450d15, - 0x1b80, 0xe3450d17, - 0x1b80, 0x6cdc0d25, - 0x1b80, 0x6cdc0d27, - 0x1b80, 0xe2eb0d35, - 0x1b80, 0xe2eb0d37, - 0x1b80, 0xe3450d45, - 0x1b80, 0xe3450d47, - 0x1b80, 0x6cf00d55, - 0x1b80, 0x6cf00d57, - 0x1b80, 0xe2eb0d65, - 0x1b80, 0xe2eb0d67, - 0x1b80, 0xe3450d75, - 0x1b80, 0xe3450d77, - 0x1b80, 0x63c30d85, - 0x1b80, 0x63c30d87, - 0x1b80, 0x55010d95, - 0x1b80, 0x55010d97, - 0x1b80, 0x57040da5, - 0x1b80, 0x57040da7, - 0x1b80, 0x57000db5, - 0x1b80, 0x57000db7, - 0x1b80, 0x96000dc5, - 0x1b80, 0x96000dc7, - 0x1b80, 0x57080dd5, - 0x1b80, 0x57080dd7, - 0x1b80, 0x57000de5, - 0x1b80, 0x57000de7, - 0x1b80, 0x95000df5, - 0x1b80, 0x95000df7, - 0x1b80, 0x4d000e05, - 0x1b80, 0x4d000e07, - 0x1b80, 0x63050e15, - 0x1b80, 0x63050e17, - 0x1b80, 0x7b400e25, - 0x1b80, 0x7b400e27, - 0x1b80, 0x7a000e35, - 0x1b80, 0x7a000e37, - 0x1b80, 0x79000e45, - 0x1b80, 0x79000e47, - 0x1b80, 0x7f400e55, - 0x1b80, 0x7f400e57, - 0x1b80, 0x7e000e65, - 0x1b80, 0x7e000e67, - 0x1b80, 0x7d000e75, - 0x1b80, 0x7d000e77, - 0x1b80, 0x00010e85, - 0x1b80, 0x00010e87, - 0x1b80, 0xe3170e95, - 0x1b80, 0xe3170e97, - 0x1b80, 0x00010ea5, - 0x1b80, 0x00010ea7, - 0x1b80, 0x5c320eb5, - 0x1b80, 0x5c320eb7, - 0x1b80, 0xe3410ec5, - 0x1b80, 0xe3410ec7, - 0x1b80, 0xe3170ed5, - 0x1b80, 0xe3170ed7, - 0x1b80, 0x00010ee5, - 0x1b80, 0x00010ee7, - 0x1b80, 0x31260ef5, - 0x1b80, 0x31260ef7, - 0x1b80, 0x00260f05, - 0x1b80, 0x00260f07, - 0x1b80, 0xe34a0f15, - 0x1b80, 0xe34a0f17, - 0x1b80, 0x00020f25, - 0x1b80, 0x00020f27, - 0x1b80, 0x54ec0f35, - 0x1b80, 0x54ec0f37, - 0x1b80, 0x0ba60f45, - 0x1b80, 0x0ba60f47, - 0x1b80, 0x00260f55, - 0x1b80, 0x00260f57, - 0x1b80, 0xe34a0f65, - 0x1b80, 0xe34a0f67, - 0x1b80, 0x00020f75, - 0x1b80, 0x00020f77, - 0x1b80, 0x63830f85, - 0x1b80, 0x63830f87, - 0x1b80, 0x30d90f95, - 0x1b80, 0x30d90f97, - 0x1b80, 0x311a0fa5, - 0x1b80, 0x311a0fa7, - 0x1b80, 0x00240fb5, - 0x1b80, 0x00240fb7, - 0x1b80, 0xe34a0fc5, - 0x1b80, 0xe34a0fc7, - 0x1b80, 0x00020fd5, - 0x1b80, 0x00020fd7, - 0x1b80, 0x54ea0fe5, - 0x1b80, 0x54ea0fe7, - 0x1b80, 0x0ba60ff5, - 0x1b80, 0x0ba60ff7, - 0x1b80, 0x00241005, - 0x1b80, 0x00241007, - 0x1b80, 0xe34a1015, - 0x1b80, 0xe34a1017, - 0x1b80, 0x00021025, - 0x1b80, 0x00021027, - 0x1b80, 0x63831035, - 0x1b80, 0x63831037, - 0x1b80, 0x30d91045, - 0x1b80, 0x30d91047, - 0x1b80, 0x5c321055, - 0x1b80, 0x5c321057, - 0x1b80, 0x54e61065, - 0x1b80, 0x54e61067, - 0x1b80, 0x6e101075, - 0x1b80, 0x6e101077, - 0x1b80, 0x6f0f1085, - 0x1b80, 0x6f0f1087, - 0x1b80, 0xe3171095, - 0x1b80, 0xe3171097, - 0x1b80, 0xe34a10a5, - 0x1b80, 0xe34a10a7, - 0x1b80, 0x5c3210b5, - 0x1b80, 0x5c3210b7, - 0x1b80, 0x54e710c5, - 0x1b80, 0x54e710c7, - 0x1b80, 0x6e2410d5, - 0x1b80, 0x6e2410d7, - 0x1b80, 0xe31710e5, - 0x1b80, 0xe31710e7, - 0x1b80, 0xe34a10f5, - 0x1b80, 0xe34a10f7, - 0x1b80, 0x5c321105, - 0x1b80, 0x5c321107, - 0x1b80, 0x54e81115, - 0x1b80, 0x54e81117, - 0x1b80, 0x6e441125, - 0x1b80, 0x6e441127, - 0x1b80, 0xe3171135, - 0x1b80, 0xe3171137, - 0x1b80, 0xe34a1145, - 0x1b80, 0xe34a1147, - 0x1b80, 0x5c321155, - 0x1b80, 0x5c321157, - 0x1b80, 0x54e91165, - 0x1b80, 0x54e91167, - 0x1b80, 0x6e641175, - 0x1b80, 0x6e641177, - 0x1b80, 0xe3171185, - 0x1b80, 0xe3171187, - 0x1b80, 0xe34a1195, - 0x1b80, 0xe34a1197, - 0x1b80, 0x5c3211a5, - 0x1b80, 0x5c3211a7, - 0x1b80, 0x54ea11b5, - 0x1b80, 0x54ea11b7, - 0x1b80, 0x0baa11c5, - 0x1b80, 0x0baa11c7, - 0x1b80, 0x6e8411d5, - 0x1b80, 0x6e8411d7, - 0x1b80, 0x6f0f11e5, - 0x1b80, 0x6f0f11e7, - 0x1b80, 0xe31711f5, - 0x1b80, 0xe31711f7, - 0x1b80, 0xe34a1205, - 0x1b80, 0xe34a1207, - 0x1b80, 0x5c321215, - 0x1b80, 0x5c321217, - 0x1b80, 0x54eb1225, - 0x1b80, 0x54eb1227, - 0x1b80, 0x6ea41235, - 0x1b80, 0x6ea41237, - 0x1b80, 0xe3171245, - 0x1b80, 0xe3171247, - 0x1b80, 0xe34a1255, - 0x1b80, 0xe34a1257, + 0x1b80, 0x30b40985, + 0x1b80, 0x30b40987, + 0x1b80, 0x00240995, + 0x1b80, 0x00240997, + 0x1b80, 0xe3e209a5, + 0x1b80, 0xe3e209a7, + 0x1b80, 0x000209b5, + 0x1b80, 0x000209b7, + 0x1b80, 0x54ea09c5, + 0x1b80, 0x54ea09c7, + 0x1b80, 0x0ba609d5, + 0x1b80, 0x0ba609d7, + 0x1b80, 0x002409e5, + 0x1b80, 0x002409e7, + 0x1b80, 0xe3e209f5, + 0x1b80, 0xe3e209f7, + 0x1b80, 0x00020a05, + 0x1b80, 0x00020a07, + 0x1b80, 0xf8e60a15, + 0x1b80, 0xf8e60a17, + 0x1b80, 0x00300a25, + 0x1b80, 0x00300a27, + 0x1b80, 0x63c30a35, + 0x1b80, 0x63c30a37, + 0x1b80, 0x00020a45, + 0x1b80, 0x00020a47, + 0x1b80, 0x318b0a55, + 0x1b80, 0x318b0a57, + 0x1b80, 0x62060a65, + 0x1b80, 0x62060a67, + 0x1b80, 0x6c100a75, + 0x1b80, 0x6c100a77, + 0x1b80, 0x6d0f0a85, + 0x1b80, 0x6d0f0a87, + 0x1b80, 0xe3880a95, + 0x1b80, 0xe3880a97, + 0x1b80, 0xe3e20aa5, + 0x1b80, 0xe3e20aa7, + 0x1b80, 0x6c240ab5, + 0x1b80, 0x6c240ab7, + 0x1b80, 0xe3880ac5, + 0x1b80, 0xe3880ac7, + 0x1b80, 0xe3e20ad5, + 0x1b80, 0xe3e20ad7, + 0x1b80, 0x6c440ae5, + 0x1b80, 0x6c440ae7, + 0x1b80, 0xe3880af5, + 0x1b80, 0xe3880af7, + 0x1b80, 0xe3e20b05, + 0x1b80, 0xe3e20b07, + 0x1b80, 0x6c640b15, + 0x1b80, 0x6c640b17, + 0x1b80, 0xe3880b25, + 0x1b80, 0xe3880b27, + 0x1b80, 0xe3e20b35, + 0x1b80, 0xe3e20b37, + 0x1b80, 0x0baa0b45, + 0x1b80, 0x0baa0b47, + 0x1b80, 0x6c840b55, + 0x1b80, 0x6c840b57, + 0x1b80, 0x6d0f0b65, + 0x1b80, 0x6d0f0b67, + 0x1b80, 0xe3880b75, + 0x1b80, 0xe3880b77, + 0x1b80, 0xe3e20b85, + 0x1b80, 0xe3e20b87, + 0x1b80, 0x6ca40b95, + 0x1b80, 0x6ca40b97, + 0x1b80, 0xe3880ba5, + 0x1b80, 0xe3880ba7, + 0x1b80, 0xe3e20bb5, + 0x1b80, 0xe3e20bb7, + 0x1b80, 0x0bac0bc5, + 0x1b80, 0x0bac0bc7, + 0x1b80, 0x6cc40bd5, + 0x1b80, 0x6cc40bd7, + 0x1b80, 0x6d0f0be5, + 0x1b80, 0x6d0f0be7, + 0x1b80, 0xe3880bf5, + 0x1b80, 0xe3880bf7, + 0x1b80, 0xe3e20c05, + 0x1b80, 0xe3e20c07, + 0x1b80, 0x6ce40c15, + 0x1b80, 0x6ce40c17, + 0x1b80, 0xe3880c25, + 0x1b80, 0xe3880c27, + 0x1b80, 0xe3e20c35, + 0x1b80, 0xe3e20c37, + 0x1b80, 0x6cf40c45, + 0x1b80, 0x6cf40c47, + 0x1b80, 0xe3880c55, + 0x1b80, 0xe3880c57, + 0x1b80, 0xe3e20c65, + 0x1b80, 0xe3e20c67, + 0x1b80, 0x6c0c0c75, + 0x1b80, 0x6c0c0c77, + 0x1b80, 0x6d000c85, + 0x1b80, 0x6d000c87, + 0x1b80, 0xe3880c95, + 0x1b80, 0xe3880c97, + 0x1b80, 0xe3e20ca5, + 0x1b80, 0xe3e20ca7, + 0x1b80, 0x6c1c0cb5, + 0x1b80, 0x6c1c0cb7, + 0x1b80, 0xe3880cc5, + 0x1b80, 0xe3880cc7, + 0x1b80, 0xe3e20cd5, + 0x1b80, 0xe3e20cd7, + 0x1b80, 0x6c3c0ce5, + 0x1b80, 0x6c3c0ce7, + 0x1b80, 0xe3880cf5, + 0x1b80, 0xe3880cf7, + 0x1b80, 0xe3e20d05, + 0x1b80, 0xe3e20d07, + 0x1b80, 0xf4b90d15, + 0x1b80, 0xf4b90d17, + 0x1b80, 0xf7b80d25, + 0x1b80, 0xf7b80d27, + 0x1b80, 0x6c5c0d35, + 0x1b80, 0x6c5c0d37, + 0x1b80, 0xe3880d45, + 0x1b80, 0xe3880d47, + 0x1b80, 0xe3e20d55, + 0x1b80, 0xe3e20d57, + 0x1b80, 0x6c7c0d65, + 0x1b80, 0x6c7c0d67, + 0x1b80, 0xe3880d75, + 0x1b80, 0xe3880d77, + 0x1b80, 0xe3e20d85, + 0x1b80, 0xe3e20d87, + 0x1b80, 0xf5c00d95, + 0x1b80, 0xf5c00d97, + 0x1b80, 0xf8bf0da5, + 0x1b80, 0xf8bf0da7, + 0x1b80, 0x6c9c0db5, + 0x1b80, 0x6c9c0db7, + 0x1b80, 0xe3880dc5, + 0x1b80, 0xe3880dc7, + 0x1b80, 0xe3e20dd5, + 0x1b80, 0xe3e20dd7, + 0x1b80, 0x6cbc0de5, + 0x1b80, 0x6cbc0de7, + 0x1b80, 0xe3880df5, + 0x1b80, 0xe3880df7, + 0x1b80, 0xe3e20e05, + 0x1b80, 0xe3e20e07, + 0x1b80, 0x6cdc0e15, + 0x1b80, 0x6cdc0e17, + 0x1b80, 0xe3880e25, + 0x1b80, 0xe3880e27, + 0x1b80, 0xe3e20e35, + 0x1b80, 0xe3e20e37, + 0x1b80, 0x6cf00e45, + 0x1b80, 0x6cf00e47, + 0x1b80, 0xe3880e55, + 0x1b80, 0xe3880e57, + 0x1b80, 0xe3e20e65, + 0x1b80, 0xe3e20e67, + 0x1b80, 0xf9a00e75, + 0x1b80, 0xf9a00e77, + 0x1b80, 0x00300e85, + 0x1b80, 0x00300e87, + 0x1b80, 0x63c30e95, + 0x1b80, 0x63c30e97, + 0x1b80, 0x00020ea5, + 0x1b80, 0x00020ea7, + 0x1b80, 0x318b0eb5, + 0x1b80, 0x318b0eb7, + 0x1b80, 0x00300ec5, + 0x1b80, 0x00300ec7, + 0x1b80, 0x00000ed5, + 0x1b80, 0x00000ed7, + 0x1b80, 0x00020ee5, + 0x1b80, 0x00020ee7, + 0x1b80, 0x55010ef5, + 0x1b80, 0x55010ef7, + 0x1b80, 0x57040f05, + 0x1b80, 0x57040f07, + 0x1b80, 0x57000f15, + 0x1b80, 0x57000f17, + 0x1b80, 0x96000f25, + 0x1b80, 0x96000f27, + 0x1b80, 0x00070f35, + 0x1b80, 0x00070f37, + 0x1b80, 0x5be00f45, + 0x1b80, 0x5be00f47, + 0x1b80, 0x5a000f55, + 0x1b80, 0x5a000f57, + 0x1b80, 0x59000f65, + 0x1b80, 0x59000f67, + 0x1b80, 0x58000f75, + 0x1b80, 0x58000f77, + 0x1b80, 0x00040f85, + 0x1b80, 0x00040f87, + 0x1b80, 0x57080f95, + 0x1b80, 0x57080f97, + 0x1b80, 0x57000fa5, + 0x1b80, 0x57000fa7, + 0x1b80, 0x95000fb5, + 0x1b80, 0x95000fb7, + 0x1b80, 0x00070fc5, + 0x1b80, 0x00070fc7, + 0x1b80, 0x58010fd5, + 0x1b80, 0x58010fd7, + 0x1b80, 0x00040fe5, + 0x1b80, 0x00040fe7, + 0x1b80, 0x00300ff5, + 0x1b80, 0x00300ff7, + 0x1b80, 0x00001005, + 0x1b80, 0x00001007, + 0x1b80, 0x00021015, + 0x1b80, 0x00021017, + 0x1b80, 0x63051025, + 0x1b80, 0x63051027, + 0x1b80, 0x7b401035, + 0x1b80, 0x7b401037, + 0x1b80, 0x7a001045, + 0x1b80, 0x7a001047, + 0x1b80, 0x79001055, + 0x1b80, 0x79001057, + 0x1b80, 0x7f401065, + 0x1b80, 0x7f401067, + 0x1b80, 0x7e001075, + 0x1b80, 0x7e001077, + 0x1b80, 0x7d001085, + 0x1b80, 0x7d001087, + 0x1b80, 0x00011095, + 0x1b80, 0x00011097, + 0x1b80, 0xe3b410a5, + 0x1b80, 0xe3b410a7, + 0x1b80, 0x000110b5, + 0x1b80, 0x000110b7, + 0x1b80, 0x5c3210c5, + 0x1b80, 0x5c3210c7, + 0x1b80, 0x54fd10d5, + 0x1b80, 0x54fd10d7, + 0x1b80, 0xe3b410e5, + 0x1b80, 0xe3b410e7, + 0x1b80, 0x000110f5, + 0x1b80, 0x000110f7, + 0x1b80, 0x31471105, + 0x1b80, 0x31471107, + 0x1b80, 0x00261115, + 0x1b80, 0x00261117, + 0x1b80, 0xe3e71125, + 0x1b80, 0xe3e71127, + 0x1b80, 0x00021135, + 0x1b80, 0x00021137, + 0x1b80, 0x54ec1145, + 0x1b80, 0x54ec1147, + 0x1b80, 0x0ba61155, + 0x1b80, 0x0ba61157, + 0x1b80, 0x00261165, + 0x1b80, 0x00261167, + 0x1b80, 0xe3e71175, + 0x1b80, 0xe3e71177, + 0x1b80, 0x00021185, + 0x1b80, 0x00021187, + 0x1b80, 0x63431195, + 0x1b80, 0x63431197, + 0x1b80, 0x30ec11a5, + 0x1b80, 0x30ec11a7, + 0x1b80, 0x313b11b5, + 0x1b80, 0x313b11b7, + 0x1b80, 0x002411c5, + 0x1b80, 0x002411c7, + 0x1b80, 0xe3e711d5, + 0x1b80, 0xe3e711d7, + 0x1b80, 0x000211e5, + 0x1b80, 0x000211e7, + 0x1b80, 0x54ea11f5, + 0x1b80, 0x54ea11f7, + 0x1b80, 0x0ba61205, + 0x1b80, 0x0ba61207, + 0x1b80, 0x00241215, + 0x1b80, 0x00241217, + 0x1b80, 0xe3e71225, + 0x1b80, 0xe3e71227, + 0x1b80, 0x00021235, + 0x1b80, 0x00021237, + 0x1b80, 0x63431245, + 0x1b80, 0x63431247, + 0x1b80, 0x30ec1255, + 0x1b80, 0x30ec1257, 0x1b80, 0x5c321265, 0x1b80, 0x5c321267, - 0x1b80, 0x54ec1275, - 0x1b80, 0x54ec1277, - 0x1b80, 0x0bac1285, - 0x1b80, 0x0bac1287, - 0x1b80, 0x6ec41295, - 0x1b80, 0x6ec41297, - 0x1b80, 0x6f0f12a5, - 0x1b80, 0x6f0f12a7, - 0x1b80, 0xe31712b5, - 0x1b80, 0xe31712b7, - 0x1b80, 0xe34a12c5, - 0x1b80, 0xe34a12c7, - 0x1b80, 0x5c3212d5, - 0x1b80, 0x5c3212d7, - 0x1b80, 0x54ed12e5, - 0x1b80, 0x54ed12e7, - 0x1b80, 0x6ee412f5, - 0x1b80, 0x6ee412f7, - 0x1b80, 0xe3171305, - 0x1b80, 0xe3171307, - 0x1b80, 0xe34a1315, - 0x1b80, 0xe34a1317, - 0x1b80, 0x5c321325, - 0x1b80, 0x5c321327, - 0x1b80, 0x54ee1335, - 0x1b80, 0x54ee1337, - 0x1b80, 0x6ef41345, - 0x1b80, 0x6ef41347, - 0x1b80, 0xe3171355, - 0x1b80, 0xe3171357, - 0x1b80, 0xe34a1365, - 0x1b80, 0xe34a1367, - 0x1b80, 0x5c321375, - 0x1b80, 0x5c321377, - 0x1b80, 0x54ef1385, - 0x1b80, 0x54ef1387, - 0x1b80, 0x6e0c1395, - 0x1b80, 0x6e0c1397, - 0x1b80, 0x6f0013a5, - 0x1b80, 0x6f0013a7, - 0x1b80, 0xe31713b5, - 0x1b80, 0xe31713b7, - 0x1b80, 0xe34a13c5, - 0x1b80, 0xe34a13c7, - 0x1b80, 0x5c3213d5, - 0x1b80, 0x5c3213d7, - 0x1b80, 0x54f013e5, - 0x1b80, 0x54f013e7, - 0x1b80, 0x6e1c13f5, - 0x1b80, 0x6e1c13f7, - 0x1b80, 0xe3171405, - 0x1b80, 0xe3171407, - 0x1b80, 0xe34a1415, - 0x1b80, 0xe34a1417, + 0x1b80, 0x54e61275, + 0x1b80, 0x54e61277, + 0x1b80, 0x6e101285, + 0x1b80, 0x6e101287, + 0x1b80, 0x6f0f1295, + 0x1b80, 0x6f0f1297, + 0x1b80, 0xe3b412a5, + 0x1b80, 0xe3b412a7, + 0x1b80, 0xe3e712b5, + 0x1b80, 0xe3e712b7, + 0x1b80, 0x5c3212c5, + 0x1b80, 0x5c3212c7, + 0x1b80, 0x54e712d5, + 0x1b80, 0x54e712d7, + 0x1b80, 0x6e2412e5, + 0x1b80, 0x6e2412e7, + 0x1b80, 0xe3b412f5, + 0x1b80, 0xe3b412f7, + 0x1b80, 0xe3e71305, + 0x1b80, 0xe3e71307, + 0x1b80, 0x5c321315, + 0x1b80, 0x5c321317, + 0x1b80, 0x54e81325, + 0x1b80, 0x54e81327, + 0x1b80, 0x6e441335, + 0x1b80, 0x6e441337, + 0x1b80, 0xe3b41345, + 0x1b80, 0xe3b41347, + 0x1b80, 0xe3e71355, + 0x1b80, 0xe3e71357, + 0x1b80, 0x5c321365, + 0x1b80, 0x5c321367, + 0x1b80, 0x54e91375, + 0x1b80, 0x54e91377, + 0x1b80, 0x6e641385, + 0x1b80, 0x6e641387, + 0x1b80, 0xe3b41395, + 0x1b80, 0xe3b41397, + 0x1b80, 0xe3e713a5, + 0x1b80, 0xe3e713a7, + 0x1b80, 0x5c3213b5, + 0x1b80, 0x5c3213b7, + 0x1b80, 0x54ea13c5, + 0x1b80, 0x54ea13c7, + 0x1b80, 0x0baa13d5, + 0x1b80, 0x0baa13d7, + 0x1b80, 0x6e8413e5, + 0x1b80, 0x6e8413e7, + 0x1b80, 0x6f0f13f5, + 0x1b80, 0x6f0f13f7, + 0x1b80, 0xe3b41405, + 0x1b80, 0xe3b41407, + 0x1b80, 0xe3e71415, + 0x1b80, 0xe3e71417, 0x1b80, 0x5c321425, 0x1b80, 0x5c321427, - 0x1b80, 0x54f11435, - 0x1b80, 0x54f11437, - 0x1b80, 0x6e3c1445, - 0x1b80, 0x6e3c1447, - 0x1b80, 0xe3171455, - 0x1b80, 0xe3171457, - 0x1b80, 0xe34a1465, - 0x1b80, 0xe34a1467, - 0x1b80, 0xfaa91475, - 0x1b80, 0xfaa91477, - 0x1b80, 0x5c321485, - 0x1b80, 0x5c321487, - 0x1b80, 0x54f21495, - 0x1b80, 0x54f21497, - 0x1b80, 0x6e5c14a5, - 0x1b80, 0x6e5c14a7, - 0x1b80, 0xe31714b5, - 0x1b80, 0xe31714b7, - 0x1b80, 0xe34a14c5, - 0x1b80, 0xe34a14c7, - 0x1b80, 0x5c3214d5, - 0x1b80, 0x5c3214d7, - 0x1b80, 0x54f314e5, - 0x1b80, 0x54f314e7, - 0x1b80, 0x6e7c14f5, - 0x1b80, 0x6e7c14f7, - 0x1b80, 0xe3171505, - 0x1b80, 0xe3171507, - 0x1b80, 0xe34a1515, - 0x1b80, 0xe34a1517, - 0x1b80, 0xfba91525, - 0x1b80, 0xfba91527, + 0x1b80, 0x54eb1435, + 0x1b80, 0x54eb1437, + 0x1b80, 0x6ea41445, + 0x1b80, 0x6ea41447, + 0x1b80, 0xe3b41455, + 0x1b80, 0xe3b41457, + 0x1b80, 0xe3e71465, + 0x1b80, 0xe3e71467, + 0x1b80, 0x5c321475, + 0x1b80, 0x5c321477, + 0x1b80, 0x54ec1485, + 0x1b80, 0x54ec1487, + 0x1b80, 0x0bac1495, + 0x1b80, 0x0bac1497, + 0x1b80, 0x6ec414a5, + 0x1b80, 0x6ec414a7, + 0x1b80, 0x6f0f14b5, + 0x1b80, 0x6f0f14b7, + 0x1b80, 0xe3b414c5, + 0x1b80, 0xe3b414c7, + 0x1b80, 0xe3e714d5, + 0x1b80, 0xe3e714d7, + 0x1b80, 0x5c3214e5, + 0x1b80, 0x5c3214e7, + 0x1b80, 0x54ed14f5, + 0x1b80, 0x54ed14f7, + 0x1b80, 0x6ee41505, + 0x1b80, 0x6ee41507, + 0x1b80, 0xe3b41515, + 0x1b80, 0xe3b41517, + 0x1b80, 0xe3e71525, + 0x1b80, 0xe3e71527, 0x1b80, 0x5c321535, 0x1b80, 0x5c321537, - 0x1b80, 0x54f41545, - 0x1b80, 0x54f41547, - 0x1b80, 0x6e9c1555, - 0x1b80, 0x6e9c1557, - 0x1b80, 0xe3171565, - 0x1b80, 0xe3171567, - 0x1b80, 0xe34a1575, - 0x1b80, 0xe34a1577, + 0x1b80, 0x54ee1545, + 0x1b80, 0x54ee1547, + 0x1b80, 0x6ef41555, + 0x1b80, 0x6ef41557, + 0x1b80, 0xe3b41565, + 0x1b80, 0xe3b41567, + 0x1b80, 0xe3e71575, + 0x1b80, 0xe3e71577, 0x1b80, 0x5c321585, 0x1b80, 0x5c321587, - 0x1b80, 0x54f51595, - 0x1b80, 0x54f51597, - 0x1b80, 0x6ebc15a5, - 0x1b80, 0x6ebc15a7, - 0x1b80, 0xe31715b5, - 0x1b80, 0xe31715b7, - 0x1b80, 0xe34a15c5, - 0x1b80, 0xe34a15c7, - 0x1b80, 0x5c3215d5, - 0x1b80, 0x5c3215d7, - 0x1b80, 0x54f615e5, - 0x1b80, 0x54f615e7, - 0x1b80, 0x6edc15f5, - 0x1b80, 0x6edc15f7, - 0x1b80, 0xe3171605, - 0x1b80, 0xe3171607, - 0x1b80, 0xe34a1615, - 0x1b80, 0xe34a1617, - 0x1b80, 0x5c321625, - 0x1b80, 0x5c321627, - 0x1b80, 0x54f71635, - 0x1b80, 0x54f71637, - 0x1b80, 0x6ef01645, - 0x1b80, 0x6ef01647, - 0x1b80, 0xe3171655, - 0x1b80, 0xe3171657, - 0x1b80, 0xe34a1665, - 0x1b80, 0xe34a1667, - 0x1b80, 0x63831675, - 0x1b80, 0x63831677, - 0x1b80, 0x30d91685, - 0x1b80, 0x30d91687, - 0x1b80, 0x00011695, - 0x1b80, 0x00011697, - 0x1b80, 0x000416a5, - 0x1b80, 0x000416a7, - 0x1b80, 0x550116b5, - 0x1b80, 0x550116b7, - 0x1b80, 0x5c3116c5, - 0x1b80, 0x5c3116c7, - 0x1b80, 0x5f8216d5, - 0x1b80, 0x5f8216d7, - 0x1b80, 0x660516e5, - 0x1b80, 0x660516e7, - 0x1b80, 0x000616f5, - 0x1b80, 0x000616f7, - 0x1b80, 0x5d801705, - 0x1b80, 0x5d801707, - 0x1b80, 0x09001715, - 0x1b80, 0x09001717, - 0x1b80, 0x0a011725, - 0x1b80, 0x0a011727, - 0x1b80, 0x0b401735, - 0x1b80, 0x0b401737, - 0x1b80, 0x0d001745, - 0x1b80, 0x0d001747, - 0x1b80, 0x0f011755, - 0x1b80, 0x0f011757, - 0x1b80, 0x002a1765, - 0x1b80, 0x002a1767, - 0x1b80, 0x055a1775, - 0x1b80, 0x055a1777, - 0x1b80, 0x05db1785, - 0x1b80, 0x05db1787, - 0x1b80, 0xe3351795, - 0x1b80, 0xe3351797, - 0x1b80, 0xe2e317a5, - 0x1b80, 0xe2e317a7, - 0x1b80, 0x000617b5, - 0x1b80, 0x000617b7, - 0x1b80, 0x06da17c5, - 0x1b80, 0x06da17c7, - 0x1b80, 0x07db17d5, - 0x1b80, 0x07db17d7, - 0x1b80, 0xe33517e5, - 0x1b80, 0xe33517e7, - 0x1b80, 0xe2e317f5, - 0x1b80, 0xe2e317f7, - 0x1b80, 0xe32c1805, - 0x1b80, 0xe32c1807, - 0x1b80, 0x00021815, - 0x1b80, 0x00021817, - 0x1b80, 0xe3311825, - 0x1b80, 0xe3311827, - 0x1b80, 0x5d001835, - 0x1b80, 0x5d001837, - 0x1b80, 0x00041845, - 0x1b80, 0x00041847, - 0x1b80, 0x5fa21855, - 0x1b80, 0x5fa21857, - 0x1b80, 0x00011865, - 0x1b80, 0x00011867, - 0x1b80, 0xe2571875, - 0x1b80, 0xe2571877, - 0x1b80, 0x74081885, - 0x1b80, 0x74081887, - 0x1b80, 0xe2a11895, - 0x1b80, 0xe2a11897, - 0x1b80, 0xe28318a5, - 0x1b80, 0xe28318a7, - 0x1b80, 0xe2c118b5, - 0x1b80, 0xe2c118b7, - 0x1b80, 0xb90018c5, - 0x1b80, 0xb90018c7, - 0x1b80, 0x990018d5, - 0x1b80, 0x990018d7, - 0x1b80, 0x000618e5, - 0x1b80, 0x000618e7, - 0x1b80, 0x770018f5, - 0x1b80, 0x770018f7, - 0x1b80, 0x00041905, - 0x1b80, 0x00041907, - 0x1b80, 0x49041915, - 0x1b80, 0x49041917, - 0x1b80, 0x4bb01925, - 0x1b80, 0x4bb01927, - 0x1b80, 0x00061935, - 0x1b80, 0x00061937, - 0x1b80, 0x75041945, - 0x1b80, 0x75041947, - 0x1b80, 0x77081955, - 0x1b80, 0x77081957, - 0x1b80, 0x00071965, - 0x1b80, 0x00071967, - 0x1b80, 0x77101975, - 0x1b80, 0x77101977, - 0x1b80, 0x00041985, - 0x1b80, 0x00041987, - 0x1b80, 0x44801995, - 0x1b80, 0x44801997, - 0x1b80, 0x45ff19a5, - 0x1b80, 0x45ff19a7, - 0x1b80, 0x463f19b5, - 0x1b80, 0x463f19b7, - 0x1b80, 0x473119c5, - 0x1b80, 0x473119c7, - 0x1b80, 0x400819d5, - 0x1b80, 0x400819d7, - 0x1b80, 0xe23e19e5, - 0x1b80, 0xe23e19e7, - 0x1b80, 0x000119f5, - 0x1b80, 0x000119f7, - 0x1b80, 0xe2571a05, - 0x1b80, 0xe2571a07, - 0x1b80, 0x74081a15, - 0x1b80, 0x74081a17, - 0x1b80, 0xe2b11a25, - 0x1b80, 0xe2b11a27, - 0x1b80, 0xe2831a35, - 0x1b80, 0xe2831a37, - 0x1b80, 0xe2c71a45, - 0x1b80, 0xe2c71a47, - 0x1b80, 0xb9001a55, - 0x1b80, 0xb9001a57, - 0x1b80, 0x99001a65, - 0x1b80, 0x99001a67, - 0x1b80, 0x00061a75, - 0x1b80, 0x00061a77, - 0x1b80, 0x77001a85, - 0x1b80, 0x77001a87, - 0x1b80, 0x00051a95, - 0x1b80, 0x00051a97, - 0x1b80, 0x61041aa5, - 0x1b80, 0x61041aa7, - 0x1b80, 0x63b01ab5, - 0x1b80, 0x63b01ab7, - 0x1b80, 0x00061ac5, - 0x1b80, 0x00061ac7, - 0x1b80, 0x75081ad5, - 0x1b80, 0x75081ad7, - 0x1b80, 0x77081ae5, - 0x1b80, 0x77081ae7, - 0x1b80, 0x00071af5, - 0x1b80, 0x00071af7, - 0x1b80, 0x77201b05, - 0x1b80, 0x77201b07, - 0x1b80, 0x00051b15, - 0x1b80, 0x00051b17, - 0x1b80, 0x5c801b25, - 0x1b80, 0x5c801b27, - 0x1b80, 0x5dff1b35, - 0x1b80, 0x5dff1b37, - 0x1b80, 0x5e3f1b45, - 0x1b80, 0x5e3f1b47, - 0x1b80, 0x5f311b55, - 0x1b80, 0x5f311b57, - 0x1b80, 0x00041b65, - 0x1b80, 0x00041b67, - 0x1b80, 0x400a1b75, - 0x1b80, 0x400a1b77, - 0x1b80, 0xe23e1b85, - 0x1b80, 0xe23e1b87, - 0x1b80, 0x00011b95, - 0x1b80, 0x00011b97, - 0x1b80, 0xe2571ba5, - 0x1b80, 0xe2571ba7, - 0x1b80, 0x74081bb5, - 0x1b80, 0x74081bb7, - 0x1b80, 0xe2a11bc5, - 0x1b80, 0xe2a11bc7, - 0x1b80, 0xe2831bd5, - 0x1b80, 0xe2831bd7, - 0x1b80, 0xe2c11be5, - 0x1b80, 0xe2c11be7, - 0x1b80, 0xe2cd1bf5, - 0x1b80, 0xe2cd1bf7, - 0x1b80, 0xe2101c05, - 0x1b80, 0xe2101c07, - 0x1b80, 0x00011c15, - 0x1b80, 0x00011c17, - 0x1b80, 0xe2571c25, - 0x1b80, 0xe2571c27, - 0x1b80, 0x74081c35, - 0x1b80, 0x74081c37, - 0x1b80, 0xe2b11c45, - 0x1b80, 0xe2b11c47, - 0x1b80, 0xe2831c55, - 0x1b80, 0xe2831c57, - 0x1b80, 0xe2c71c65, - 0x1b80, 0xe2c71c67, - 0x1b80, 0xe2cd1c75, - 0x1b80, 0xe2cd1c77, - 0x1b80, 0xe2261c85, - 0x1b80, 0xe2261c87, - 0x1b80, 0x00011c95, - 0x1b80, 0x00011c97, - 0x1b80, 0xe26d1ca5, - 0x1b80, 0xe26d1ca7, - 0x1b80, 0x74001cb5, - 0x1b80, 0x74001cb7, - 0x1b80, 0xe2a11cc5, - 0x1b80, 0xe2a11cc7, - 0x1b80, 0xe2921cd5, - 0x1b80, 0xe2921cd7, - 0x1b80, 0xe2c11ce5, - 0x1b80, 0xe2c11ce7, - 0x1b80, 0xe2cd1cf5, - 0x1b80, 0xe2cd1cf7, - 0x1b80, 0xe2101d05, - 0x1b80, 0xe2101d07, - 0x1b80, 0x00011d15, - 0x1b80, 0x00011d17, - 0x1b80, 0xe26d1d25, - 0x1b80, 0xe26d1d27, - 0x1b80, 0x74001d35, - 0x1b80, 0x74001d37, - 0x1b80, 0xe2b11d45, - 0x1b80, 0xe2b11d47, - 0x1b80, 0xe2921d55, - 0x1b80, 0xe2921d57, - 0x1b80, 0xe2c71d65, - 0x1b80, 0xe2c71d67, - 0x1b80, 0xe2cd1d75, - 0x1b80, 0xe2cd1d77, - 0x1b80, 0xe2261d85, - 0x1b80, 0xe2261d87, - 0x1b80, 0x00011d95, - 0x1b80, 0x00011d97, - 0x1b80, 0x00041da5, - 0x1b80, 0x00041da7, - 0x1b80, 0x445b1db5, - 0x1b80, 0x445b1db7, - 0x1b80, 0x47b01dc5, - 0x1b80, 0x47b01dc7, - 0x1b80, 0x47301dd5, - 0x1b80, 0x47301dd7, - 0x1b80, 0x47001de5, - 0x1b80, 0x47001de7, - 0x1b80, 0x00061df5, - 0x1b80, 0x00061df7, - 0x1b80, 0x77081e05, - 0x1b80, 0x77081e07, - 0x1b80, 0x00041e15, - 0x1b80, 0x00041e17, - 0x1b80, 0x49401e25, - 0x1b80, 0x49401e27, - 0x1b80, 0x4bb01e35, - 0x1b80, 0x4bb01e37, - 0x1b80, 0x00071e45, - 0x1b80, 0x00071e47, - 0x1b80, 0x54401e55, - 0x1b80, 0x54401e57, - 0x1b80, 0x00041e65, - 0x1b80, 0x00041e67, - 0x1b80, 0x40081e75, - 0x1b80, 0x40081e77, - 0x1b80, 0x00011e85, - 0x1b80, 0x00011e87, - 0x1b80, 0x00051e95, - 0x1b80, 0x00051e97, - 0x1b80, 0x5c5b1ea5, - 0x1b80, 0x5c5b1ea7, - 0x1b80, 0x5fb01eb5, - 0x1b80, 0x5fb01eb7, - 0x1b80, 0x5f301ec5, - 0x1b80, 0x5f301ec7, - 0x1b80, 0x5f001ed5, - 0x1b80, 0x5f001ed7, - 0x1b80, 0x00061ee5, - 0x1b80, 0x00061ee7, - 0x1b80, 0x77081ef5, - 0x1b80, 0x77081ef7, - 0x1b80, 0x00051f05, - 0x1b80, 0x00051f07, - 0x1b80, 0x61401f15, - 0x1b80, 0x61401f17, - 0x1b80, 0x63b01f25, - 0x1b80, 0x63b01f27, - 0x1b80, 0x00071f35, - 0x1b80, 0x00071f37, - 0x1b80, 0x54401f45, - 0x1b80, 0x54401f47, - 0x1b80, 0x00041f55, - 0x1b80, 0x00041f57, - 0x1b80, 0x40081f65, - 0x1b80, 0x40081f67, - 0x1b80, 0x00011f75, - 0x1b80, 0x00011f77, - 0x1b80, 0xe2571f85, - 0x1b80, 0xe2571f87, - 0x1b80, 0x74081f95, - 0x1b80, 0x74081f97, - 0x1b80, 0xe2a11fa5, - 0x1b80, 0xe2a11fa7, - 0x1b80, 0x00041fb5, - 0x1b80, 0x00041fb7, - 0x1b80, 0x40081fc5, - 0x1b80, 0x40081fc7, - 0x1b80, 0x00011fd5, - 0x1b80, 0x00011fd7, - 0x1b80, 0xe2571fe5, - 0x1b80, 0xe2571fe7, - 0x1b80, 0x74081ff5, - 0x1b80, 0x74081ff7, - 0x1b80, 0xe2b12005, - 0x1b80, 0xe2b12007, - 0x1b80, 0x00042015, - 0x1b80, 0x00042017, - 0x1b80, 0x40082025, - 0x1b80, 0x40082027, - 0x1b80, 0x00012035, - 0x1b80, 0x00012037, - 0x1b80, 0xe26d2045, - 0x1b80, 0xe26d2047, - 0x1b80, 0x74002055, - 0x1b80, 0x74002057, - 0x1b80, 0xe2a12065, - 0x1b80, 0xe2a12067, - 0x1b80, 0x00042075, - 0x1b80, 0x00042077, - 0x1b80, 0x40082085, - 0x1b80, 0x40082087, - 0x1b80, 0x00012095, - 0x1b80, 0x00012097, - 0x1b80, 0xe26d20a5, - 0x1b80, 0xe26d20a7, - 0x1b80, 0x740020b5, - 0x1b80, 0x740020b7, - 0x1b80, 0xe2b120c5, - 0x1b80, 0xe2b120c7, - 0x1b80, 0x000420d5, - 0x1b80, 0x000420d7, - 0x1b80, 0x400820e5, - 0x1b80, 0x400820e7, - 0x1b80, 0x000120f5, - 0x1b80, 0x000120f7, - 0x1b80, 0x00042105, - 0x1b80, 0x00042107, - 0x1b80, 0x49042115, - 0x1b80, 0x49042117, - 0x1b80, 0x4bb02125, - 0x1b80, 0x4bb02127, - 0x1b80, 0x00062135, - 0x1b80, 0x00062137, - 0x1b80, 0x75042145, - 0x1b80, 0x75042147, - 0x1b80, 0x77082155, - 0x1b80, 0x77082157, - 0x1b80, 0x00042165, - 0x1b80, 0x00042167, - 0x1b80, 0x44802175, - 0x1b80, 0x44802177, - 0x1b80, 0x45ff2185, - 0x1b80, 0x45ff2187, - 0x1b80, 0x463f2195, - 0x1b80, 0x463f2197, - 0x1b80, 0x473121a5, - 0x1b80, 0x473121a7, - 0x1b80, 0x400821b5, - 0x1b80, 0x400821b7, - 0x1b80, 0xe23e21c5, - 0x1b80, 0xe23e21c7, - 0x1b80, 0x000421d5, - 0x1b80, 0x000421d7, - 0x1b80, 0x400c21e5, - 0x1b80, 0x400c21e7, - 0x1b80, 0x000621f5, - 0x1b80, 0x000621f7, - 0x1b80, 0x75002205, - 0x1b80, 0x75002207, - 0x1b80, 0x00042215, - 0x1b80, 0x00042217, - 0x1b80, 0x445b2225, - 0x1b80, 0x445b2227, - 0x1b80, 0x47002235, - 0x1b80, 0x47002237, - 0x1b80, 0x40082245, - 0x1b80, 0x40082247, + 0x1b80, 0x54ef1595, + 0x1b80, 0x54ef1597, + 0x1b80, 0x6e0c15a5, + 0x1b80, 0x6e0c15a7, + 0x1b80, 0x6f0015b5, + 0x1b80, 0x6f0015b7, + 0x1b80, 0xe3b415c5, + 0x1b80, 0xe3b415c7, + 0x1b80, 0xe3e715d5, + 0x1b80, 0xe3e715d7, + 0x1b80, 0x5c3215e5, + 0x1b80, 0x5c3215e7, + 0x1b80, 0x54f015f5, + 0x1b80, 0x54f015f7, + 0x1b80, 0x6e1c1605, + 0x1b80, 0x6e1c1607, + 0x1b80, 0xe3b41615, + 0x1b80, 0xe3b41617, + 0x1b80, 0xe3e71625, + 0x1b80, 0xe3e71627, + 0x1b80, 0x5c321635, + 0x1b80, 0x5c321637, + 0x1b80, 0x54f11645, + 0x1b80, 0x54f11647, + 0x1b80, 0x6e3c1655, + 0x1b80, 0x6e3c1657, + 0x1b80, 0xe3b41665, + 0x1b80, 0xe3b41667, + 0x1b80, 0xe3e71675, + 0x1b80, 0xe3e71677, + 0x1b80, 0xfaa91685, + 0x1b80, 0xfaa91687, + 0x1b80, 0x5c321695, + 0x1b80, 0x5c321697, + 0x1b80, 0x54f216a5, + 0x1b80, 0x54f216a7, + 0x1b80, 0x6e5c16b5, + 0x1b80, 0x6e5c16b7, + 0x1b80, 0xe3b416c5, + 0x1b80, 0xe3b416c7, + 0x1b80, 0xe3e716d5, + 0x1b80, 0xe3e716d7, + 0x1b80, 0x5c3216e5, + 0x1b80, 0x5c3216e7, + 0x1b80, 0x54f316f5, + 0x1b80, 0x54f316f7, + 0x1b80, 0x6e7c1705, + 0x1b80, 0x6e7c1707, + 0x1b80, 0xe3b41715, + 0x1b80, 0xe3b41717, + 0x1b80, 0xe3e71725, + 0x1b80, 0xe3e71727, + 0x1b80, 0xfba91735, + 0x1b80, 0xfba91737, + 0x1b80, 0x5c321745, + 0x1b80, 0x5c321747, + 0x1b80, 0x54f41755, + 0x1b80, 0x54f41757, + 0x1b80, 0x6e9c1765, + 0x1b80, 0x6e9c1767, + 0x1b80, 0xe3b41775, + 0x1b80, 0xe3b41777, + 0x1b80, 0xe3e71785, + 0x1b80, 0xe3e71787, + 0x1b80, 0x5c321795, + 0x1b80, 0x5c321797, + 0x1b80, 0x54f517a5, + 0x1b80, 0x54f517a7, + 0x1b80, 0x6ebc17b5, + 0x1b80, 0x6ebc17b7, + 0x1b80, 0xe3b417c5, + 0x1b80, 0xe3b417c7, + 0x1b80, 0xe3e717d5, + 0x1b80, 0xe3e717d7, + 0x1b80, 0x5c3217e5, + 0x1b80, 0x5c3217e7, + 0x1b80, 0x54f617f5, + 0x1b80, 0x54f617f7, + 0x1b80, 0x6edc1805, + 0x1b80, 0x6edc1807, + 0x1b80, 0xe3b41815, + 0x1b80, 0xe3b41817, + 0x1b80, 0xe3e71825, + 0x1b80, 0xe3e71827, + 0x1b80, 0x5c321835, + 0x1b80, 0x5c321837, + 0x1b80, 0x54f71845, + 0x1b80, 0x54f71847, + 0x1b80, 0x6ef01855, + 0x1b80, 0x6ef01857, + 0x1b80, 0xe3b41865, + 0x1b80, 0xe3b41867, + 0x1b80, 0xe3e71875, + 0x1b80, 0xe3e71877, + 0x1b80, 0x63431885, + 0x1b80, 0x63431887, + 0x1b80, 0x30ec1895, + 0x1b80, 0x30ec1897, + 0x1b80, 0x000118a5, + 0x1b80, 0x000118a7, + 0x1b80, 0x63c318b5, + 0x1b80, 0x63c318b7, + 0x1b80, 0x003018c5, + 0x1b80, 0x003018c7, + 0x1b80, 0x000018d5, + 0x1b80, 0x000018d7, + 0x1b80, 0x000218e5, + 0x1b80, 0x000218e7, + 0x1b80, 0x550118f5, + 0x1b80, 0x550118f7, + 0x1b80, 0x57041905, + 0x1b80, 0x57041907, + 0x1b80, 0x57001915, + 0x1b80, 0x57001917, + 0x1b80, 0x96001925, + 0x1b80, 0x96001927, + 0x1b80, 0x00301935, + 0x1b80, 0x00301937, + 0x1b80, 0x00071945, + 0x1b80, 0x00071947, + 0x1b80, 0x5be01955, + 0x1b80, 0x5be01957, + 0x1b80, 0x5a001965, + 0x1b80, 0x5a001967, + 0x1b80, 0x59001975, + 0x1b80, 0x59001977, + 0x1b80, 0x58001985, + 0x1b80, 0x58001987, + 0x1b80, 0x00041995, + 0x1b80, 0x00041997, + 0x1b80, 0x000219a5, + 0x1b80, 0x000219a7, + 0x1b80, 0x570819b5, + 0x1b80, 0x570819b7, + 0x1b80, 0x570019c5, + 0x1b80, 0x570019c7, + 0x1b80, 0x950019d5, + 0x1b80, 0x950019d7, + 0x1b80, 0x003019e5, + 0x1b80, 0x003019e7, + 0x1b80, 0x000719f5, + 0x1b80, 0x000719f7, + 0x1b80, 0x58011a05, + 0x1b80, 0x58011a07, + 0x1b80, 0x00041a15, + 0x1b80, 0x00041a17, + 0x1b80, 0x00021a25, + 0x1b80, 0x00021a27, + 0x1b80, 0x00301a35, + 0x1b80, 0x00301a37, + 0x1b80, 0x00001a45, + 0x1b80, 0x00001a47, + 0x1b80, 0x00021a55, + 0x1b80, 0x00021a57, + 0x1b80, 0x63051a65, + 0x1b80, 0x63051a67, + 0x1b80, 0x7b401a75, + 0x1b80, 0x7b401a77, + 0x1b80, 0x7a001a85, + 0x1b80, 0x7a001a87, + 0x1b80, 0x79001a95, + 0x1b80, 0x79001a97, + 0x1b80, 0x7f401aa5, + 0x1b80, 0x7f401aa7, + 0x1b80, 0x7e001ab5, + 0x1b80, 0x7e001ab7, + 0x1b80, 0x7d001ac5, + 0x1b80, 0x7d001ac7, + 0x1b80, 0x00011ad5, + 0x1b80, 0x00011ad7, + 0x1b80, 0x00041ae5, + 0x1b80, 0x00041ae7, + 0x1b80, 0x55011af5, + 0x1b80, 0x55011af7, + 0x1b80, 0x5c311b05, + 0x1b80, 0x5c311b07, + 0x1b80, 0x5f821b15, + 0x1b80, 0x5f821b17, + 0x1b80, 0x66051b25, + 0x1b80, 0x66051b27, + 0x1b80, 0x00061b35, + 0x1b80, 0x00061b37, + 0x1b80, 0x5d801b45, + 0x1b80, 0x5d801b47, + 0x1b80, 0x09001b55, + 0x1b80, 0x09001b57, + 0x1b80, 0x0a011b65, + 0x1b80, 0x0a011b67, + 0x1b80, 0x0b401b75, + 0x1b80, 0x0b401b77, + 0x1b80, 0x0d001b85, + 0x1b80, 0x0d001b87, + 0x1b80, 0x0f011b95, + 0x1b80, 0x0f011b97, + 0x1b80, 0x002a1ba5, + 0x1b80, 0x002a1ba7, + 0x1b80, 0x055a1bb5, + 0x1b80, 0x055a1bb7, + 0x1b80, 0x05db1bc5, + 0x1b80, 0x05db1bc7, + 0x1b80, 0xe3d21bd5, + 0x1b80, 0xe3d21bd7, + 0x1b80, 0xe3801be5, + 0x1b80, 0xe3801be7, + 0x1b80, 0x00061bf5, + 0x1b80, 0x00061bf7, + 0x1b80, 0x06da1c05, + 0x1b80, 0x06da1c07, + 0x1b80, 0x07db1c15, + 0x1b80, 0x07db1c17, + 0x1b80, 0xe3d21c25, + 0x1b80, 0xe3d21c27, + 0x1b80, 0xe3801c35, + 0x1b80, 0xe3801c37, + 0x1b80, 0xe3c91c45, + 0x1b80, 0xe3c91c47, + 0x1b80, 0x00021c55, + 0x1b80, 0x00021c57, + 0x1b80, 0xe3ce1c65, + 0x1b80, 0xe3ce1c67, + 0x1b80, 0x5d001c75, + 0x1b80, 0x5d001c77, + 0x1b80, 0x00041c85, + 0x1b80, 0x00041c87, + 0x1b80, 0x5fa21c95, + 0x1b80, 0x5fa21c97, + 0x1b80, 0x00011ca5, + 0x1b80, 0x00011ca7, + 0x1b80, 0x00041cb5, + 0x1b80, 0x00041cb7, + 0x1b80, 0xe2711cc5, + 0x1b80, 0xe2711cc7, + 0x1b80, 0xe2821cd5, + 0x1b80, 0xe2821cd7, + 0x1b80, 0xe28b1ce5, + 0x1b80, 0xe28b1ce7, + 0x1b80, 0xe29c1cf5, + 0x1b80, 0xe29c1cf7, + 0x1b80, 0x00051d05, + 0x1b80, 0x00051d07, + 0x1b80, 0xe2641d15, + 0x1b80, 0xe2641d17, + 0x1b80, 0xe2711d25, + 0x1b80, 0xe2711d27, + 0x1b80, 0xe28b1d35, + 0x1b80, 0xe28b1d37, + 0x1b80, 0xe29c1d45, + 0x1b80, 0xe29c1d47, + 0x1b80, 0x00061d55, + 0x1b80, 0x00061d57, + 0x1b80, 0xe2641d65, + 0x1b80, 0xe2641d67, + 0x1b80, 0xe2711d75, + 0x1b80, 0xe2711d77, + 0x1b80, 0xe2821d85, + 0x1b80, 0xe2821d87, + 0x1b80, 0xe28b1d95, + 0x1b80, 0xe28b1d97, + 0x1b80, 0x00011da5, + 0x1b80, 0x00011da7, + 0x1b80, 0xe2f41db5, + 0x1b80, 0xe2f41db7, + 0x1b80, 0x74081dc5, + 0x1b80, 0x74081dc7, + 0x1b80, 0xe33e1dd5, + 0x1b80, 0xe33e1dd7, + 0x1b80, 0xe3201de5, + 0x1b80, 0xe3201de7, + 0x1b80, 0xe35e1df5, + 0x1b80, 0xe35e1df7, + 0x1b80, 0xb9001e05, + 0x1b80, 0xb9001e07, + 0x1b80, 0x99001e15, + 0x1b80, 0x99001e17, + 0x1b80, 0x00061e25, + 0x1b80, 0x00061e27, + 0x1b80, 0x77001e35, + 0x1b80, 0x77001e37, + 0x1b80, 0x00041e45, + 0x1b80, 0x00041e47, + 0x1b80, 0x49041e55, + 0x1b80, 0x49041e57, + 0x1b80, 0x4bb01e65, + 0x1b80, 0x4bb01e67, + 0x1b80, 0x00061e75, + 0x1b80, 0x00061e77, + 0x1b80, 0x75041e85, + 0x1b80, 0x75041e87, + 0x1b80, 0x77081e95, + 0x1b80, 0x77081e97, + 0x1b80, 0x00071ea5, + 0x1b80, 0x00071ea7, + 0x1b80, 0x77101eb5, + 0x1b80, 0x77101eb7, + 0x1b80, 0x00041ec5, + 0x1b80, 0x00041ec7, + 0x1b80, 0x44801ed5, + 0x1b80, 0x44801ed7, + 0x1b80, 0x45ff1ee5, + 0x1b80, 0x45ff1ee7, + 0x1b80, 0x463f1ef5, + 0x1b80, 0x463f1ef7, + 0x1b80, 0x47311f05, + 0x1b80, 0x47311f07, + 0x1b80, 0x40081f15, + 0x1b80, 0x40081f17, + 0x1b80, 0xe2db1f25, + 0x1b80, 0xe2db1f27, + 0x1b80, 0x00011f35, + 0x1b80, 0x00011f37, + 0x1b80, 0xe2f41f45, + 0x1b80, 0xe2f41f47, + 0x1b80, 0x74081f55, + 0x1b80, 0x74081f57, + 0x1b80, 0xe34e1f65, + 0x1b80, 0xe34e1f67, + 0x1b80, 0xe3201f75, + 0x1b80, 0xe3201f77, + 0x1b80, 0xe3641f85, + 0x1b80, 0xe3641f87, + 0x1b80, 0xb9001f95, + 0x1b80, 0xb9001f97, + 0x1b80, 0x99001fa5, + 0x1b80, 0x99001fa7, + 0x1b80, 0x00061fb5, + 0x1b80, 0x00061fb7, + 0x1b80, 0x77001fc5, + 0x1b80, 0x77001fc7, + 0x1b80, 0x00051fd5, + 0x1b80, 0x00051fd7, + 0x1b80, 0x61041fe5, + 0x1b80, 0x61041fe7, + 0x1b80, 0x63b01ff5, + 0x1b80, 0x63b01ff7, + 0x1b80, 0x00062005, + 0x1b80, 0x00062007, + 0x1b80, 0x75082015, + 0x1b80, 0x75082017, + 0x1b80, 0x77082025, + 0x1b80, 0x77082027, + 0x1b80, 0x00072035, + 0x1b80, 0x00072037, + 0x1b80, 0x77202045, + 0x1b80, 0x77202047, + 0x1b80, 0x00052055, + 0x1b80, 0x00052057, + 0x1b80, 0x5c802065, + 0x1b80, 0x5c802067, + 0x1b80, 0x5dff2075, + 0x1b80, 0x5dff2077, + 0x1b80, 0x5e3f2085, + 0x1b80, 0x5e3f2087, + 0x1b80, 0x5f312095, + 0x1b80, 0x5f312097, + 0x1b80, 0x000420a5, + 0x1b80, 0x000420a7, + 0x1b80, 0x400a20b5, + 0x1b80, 0x400a20b7, + 0x1b80, 0xe2db20c5, + 0x1b80, 0xe2db20c7, + 0x1b80, 0x000120d5, + 0x1b80, 0x000120d7, + 0x1b80, 0xe2f420e5, + 0x1b80, 0xe2f420e7, + 0x1b80, 0x740820f5, + 0x1b80, 0x740820f7, + 0x1b80, 0xe33e2105, + 0x1b80, 0xe33e2107, + 0x1b80, 0xe3202115, + 0x1b80, 0xe3202117, + 0x1b80, 0xe35e2125, + 0x1b80, 0xe35e2127, + 0x1b80, 0xe36a2135, + 0x1b80, 0xe36a2137, + 0x1b80, 0xe2ad2145, + 0x1b80, 0xe2ad2147, + 0x1b80, 0x00012155, + 0x1b80, 0x00012157, + 0x1b80, 0xe2f42165, + 0x1b80, 0xe2f42167, + 0x1b80, 0x74082175, + 0x1b80, 0x74082177, + 0x1b80, 0xe34e2185, + 0x1b80, 0xe34e2187, + 0x1b80, 0xe3202195, + 0x1b80, 0xe3202197, + 0x1b80, 0xe36421a5, + 0x1b80, 0xe36421a7, + 0x1b80, 0xe36a21b5, + 0x1b80, 0xe36a21b7, + 0x1b80, 0xe2c321c5, + 0x1b80, 0xe2c321c7, + 0x1b80, 0x000121d5, + 0x1b80, 0x000121d7, + 0x1b80, 0xe30a21e5, + 0x1b80, 0xe30a21e7, + 0x1b80, 0x740021f5, + 0x1b80, 0x740021f7, + 0x1b80, 0xe33e2205, + 0x1b80, 0xe33e2207, + 0x1b80, 0xe32f2215, + 0x1b80, 0xe32f2217, + 0x1b80, 0xe35e2225, + 0x1b80, 0xe35e2227, + 0x1b80, 0xe36a2235, + 0x1b80, 0xe36a2237, + 0x1b80, 0xe2ad2245, + 0x1b80, 0xe2ad2247, 0x1b80, 0x00012255, 0x1b80, 0x00012257, - 0x1b80, 0x00052265, - 0x1b80, 0x00052267, - 0x1b80, 0x61042275, - 0x1b80, 0x61042277, - 0x1b80, 0x63b02285, - 0x1b80, 0x63b02287, - 0x1b80, 0x00062295, - 0x1b80, 0x00062297, - 0x1b80, 0x750822a5, - 0x1b80, 0x750822a7, - 0x1b80, 0x770822b5, - 0x1b80, 0x770822b7, - 0x1b80, 0x000522c5, - 0x1b80, 0x000522c7, - 0x1b80, 0x5c8022d5, - 0x1b80, 0x5c8022d7, - 0x1b80, 0x5dff22e5, - 0x1b80, 0x5dff22e7, - 0x1b80, 0x5e3f22f5, - 0x1b80, 0x5e3f22f7, - 0x1b80, 0x5f312305, - 0x1b80, 0x5f312307, - 0x1b80, 0x00042315, - 0x1b80, 0x00042317, - 0x1b80, 0x400a2325, - 0x1b80, 0x400a2327, - 0x1b80, 0xe23e2335, - 0x1b80, 0xe23e2337, - 0x1b80, 0x00042345, - 0x1b80, 0x00042347, - 0x1b80, 0x400c2355, - 0x1b80, 0x400c2357, - 0x1b80, 0x00062365, - 0x1b80, 0x00062367, - 0x1b80, 0x75002375, - 0x1b80, 0x75002377, - 0x1b80, 0x00052385, - 0x1b80, 0x00052387, - 0x1b80, 0x5c5b2395, - 0x1b80, 0x5c5b2397, - 0x1b80, 0x5f0023a5, - 0x1b80, 0x5f0023a7, - 0x1b80, 0x000423b5, - 0x1b80, 0x000423b7, - 0x1b80, 0x400823c5, - 0x1b80, 0x400823c7, - 0x1b80, 0x000123d5, - 0x1b80, 0x000123d7, - 0x1b80, 0x000723e5, - 0x1b80, 0x000723e7, - 0x1b80, 0x4c1223f5, - 0x1b80, 0x4c1223f7, - 0x1b80, 0x4e202405, - 0x1b80, 0x4e202407, - 0x1b80, 0x00052415, - 0x1b80, 0x00052417, - 0x1b80, 0x598f2425, - 0x1b80, 0x598f2427, - 0x1b80, 0x40022435, - 0x1b80, 0x40022437, - 0x1b80, 0x4c012445, - 0x1b80, 0x4c012447, - 0x1b80, 0x4c002455, - 0x1b80, 0x4c002457, - 0x1b80, 0xab002465, - 0x1b80, 0xab002467, - 0x1b80, 0x40032475, - 0x1b80, 0x40032477, - 0x1b80, 0x49802485, - 0x1b80, 0x49802487, - 0x1b80, 0x56c02495, - 0x1b80, 0x56c02497, - 0x1b80, 0x540224a5, - 0x1b80, 0x540224a7, - 0x1b80, 0x4c0124b5, - 0x1b80, 0x4c0124b7, - 0x1b80, 0x4c0024c5, - 0x1b80, 0x4c0024c7, - 0x1b80, 0xab0024d5, - 0x1b80, 0xab0024d7, - 0x1b80, 0x540024e5, - 0x1b80, 0x540024e7, - 0x1b80, 0x000724f5, - 0x1b80, 0x000724f7, - 0x1b80, 0x4c002505, - 0x1b80, 0x4c002507, - 0x1b80, 0x4e002515, - 0x1b80, 0x4e002517, - 0x1b80, 0x00052525, - 0x1b80, 0x00052527, - 0x1b80, 0x40042535, - 0x1b80, 0x40042537, - 0x1b80, 0x4c012545, - 0x1b80, 0x4c012547, - 0x1b80, 0x4c002555, - 0x1b80, 0x4c002557, - 0x1b80, 0x00012565, - 0x1b80, 0x00012567, - 0x1b80, 0x00042575, - 0x1b80, 0x00042577, - 0x1b80, 0x44802585, - 0x1b80, 0x44802587, - 0x1b80, 0x4b002595, - 0x1b80, 0x4b002597, - 0x1b80, 0x000525a5, - 0x1b80, 0x000525a7, - 0x1b80, 0x5c8025b5, - 0x1b80, 0x5c8025b7, - 0x1b80, 0x630025c5, - 0x1b80, 0x630025c7, - 0x1b80, 0x000725d5, - 0x1b80, 0x000725d7, - 0x1b80, 0x780c25e5, - 0x1b80, 0x780c25e7, - 0x1b80, 0x791925f5, - 0x1b80, 0x791925f7, - 0x1b80, 0x7a002605, - 0x1b80, 0x7a002607, - 0x1b80, 0x7b822615, - 0x1b80, 0x7b822617, - 0x1b80, 0x7b022625, - 0x1b80, 0x7b022627, - 0x1b80, 0x78142635, - 0x1b80, 0x78142637, - 0x1b80, 0x79ee2645, - 0x1b80, 0x79ee2647, - 0x1b80, 0x7a012655, - 0x1b80, 0x7a012657, - 0x1b80, 0x7b832665, - 0x1b80, 0x7b832667, - 0x1b80, 0x7b032675, - 0x1b80, 0x7b032677, - 0x1b80, 0x78282685, - 0x1b80, 0x78282687, - 0x1b80, 0x79b42695, - 0x1b80, 0x79b42697, - 0x1b80, 0x7a0026a5, - 0x1b80, 0x7a0026a7, - 0x1b80, 0x7b0026b5, - 0x1b80, 0x7b0026b7, - 0x1b80, 0x000126c5, - 0x1b80, 0x000126c7, - 0x1b80, 0x000426d5, - 0x1b80, 0x000426d7, - 0x1b80, 0x448026e5, - 0x1b80, 0x448026e7, + 0x1b80, 0xe30a2265, + 0x1b80, 0xe30a2267, + 0x1b80, 0x74002275, + 0x1b80, 0x74002277, + 0x1b80, 0xe34e2285, + 0x1b80, 0xe34e2287, + 0x1b80, 0xe32f2295, + 0x1b80, 0xe32f2297, + 0x1b80, 0xe36422a5, + 0x1b80, 0xe36422a7, + 0x1b80, 0xe36a22b5, + 0x1b80, 0xe36a22b7, + 0x1b80, 0xe2c322c5, + 0x1b80, 0xe2c322c7, + 0x1b80, 0x000122d5, + 0x1b80, 0x000122d7, + 0x1b80, 0x000422e5, + 0x1b80, 0x000422e7, + 0x1b80, 0x445b22f5, + 0x1b80, 0x445b22f7, + 0x1b80, 0x47b02305, + 0x1b80, 0x47b02307, + 0x1b80, 0x47302315, + 0x1b80, 0x47302317, + 0x1b80, 0x47002325, + 0x1b80, 0x47002327, + 0x1b80, 0x00062335, + 0x1b80, 0x00062337, + 0x1b80, 0x77082345, + 0x1b80, 0x77082347, + 0x1b80, 0x00042355, + 0x1b80, 0x00042357, + 0x1b80, 0x49402365, + 0x1b80, 0x49402367, + 0x1b80, 0x4bb02375, + 0x1b80, 0x4bb02377, + 0x1b80, 0x00072385, + 0x1b80, 0x00072387, + 0x1b80, 0x54402395, + 0x1b80, 0x54402397, + 0x1b80, 0x000423a5, + 0x1b80, 0x000423a7, + 0x1b80, 0x400823b5, + 0x1b80, 0x400823b7, + 0x1b80, 0x000123c5, + 0x1b80, 0x000123c7, + 0x1b80, 0x000523d5, + 0x1b80, 0x000523d7, + 0x1b80, 0x5c5b23e5, + 0x1b80, 0x5c5b23e7, + 0x1b80, 0x5fb023f5, + 0x1b80, 0x5fb023f7, + 0x1b80, 0x5f302405, + 0x1b80, 0x5f302407, + 0x1b80, 0x5f002415, + 0x1b80, 0x5f002417, + 0x1b80, 0x00062425, + 0x1b80, 0x00062427, + 0x1b80, 0x77082435, + 0x1b80, 0x77082437, + 0x1b80, 0x00052445, + 0x1b80, 0x00052447, + 0x1b80, 0x61402455, + 0x1b80, 0x61402457, + 0x1b80, 0x63b02465, + 0x1b80, 0x63b02467, + 0x1b80, 0x00072475, + 0x1b80, 0x00072477, + 0x1b80, 0x54402485, + 0x1b80, 0x54402487, + 0x1b80, 0x00042495, + 0x1b80, 0x00042497, + 0x1b80, 0x400824a5, + 0x1b80, 0x400824a7, + 0x1b80, 0x000124b5, + 0x1b80, 0x000124b7, + 0x1b80, 0xe2f424c5, + 0x1b80, 0xe2f424c7, + 0x1b80, 0x740824d5, + 0x1b80, 0x740824d7, + 0x1b80, 0xe33e24e5, + 0x1b80, 0xe33e24e7, + 0x1b80, 0x000424f5, + 0x1b80, 0x000424f7, + 0x1b80, 0x40082505, + 0x1b80, 0x40082507, + 0x1b80, 0x00012515, + 0x1b80, 0x00012517, + 0x1b80, 0xe2f42525, + 0x1b80, 0xe2f42527, + 0x1b80, 0x74082535, + 0x1b80, 0x74082537, + 0x1b80, 0xe34e2545, + 0x1b80, 0xe34e2547, + 0x1b80, 0x00042555, + 0x1b80, 0x00042557, + 0x1b80, 0x40082565, + 0x1b80, 0x40082567, + 0x1b80, 0x00012575, + 0x1b80, 0x00012577, + 0x1b80, 0xe30a2585, + 0x1b80, 0xe30a2587, + 0x1b80, 0x74002595, + 0x1b80, 0x74002597, + 0x1b80, 0xe33e25a5, + 0x1b80, 0xe33e25a7, + 0x1b80, 0x000425b5, + 0x1b80, 0x000425b7, + 0x1b80, 0x400825c5, + 0x1b80, 0x400825c7, + 0x1b80, 0x000125d5, + 0x1b80, 0x000125d7, + 0x1b80, 0xe30a25e5, + 0x1b80, 0xe30a25e7, + 0x1b80, 0x740025f5, + 0x1b80, 0x740025f7, + 0x1b80, 0xe34e2605, + 0x1b80, 0xe34e2607, + 0x1b80, 0x00042615, + 0x1b80, 0x00042617, + 0x1b80, 0x40082625, + 0x1b80, 0x40082627, + 0x1b80, 0x00012635, + 0x1b80, 0x00012637, + 0x1b80, 0x40ff2645, + 0x1b80, 0x40ff2647, + 0x1b80, 0x411f2655, + 0x1b80, 0x411f2657, + 0x1b80, 0x42002665, + 0x1b80, 0x42002667, + 0x1b80, 0x43002675, + 0x1b80, 0x43002677, + 0x1b80, 0x44ff2685, + 0x1b80, 0x44ff2687, + 0x1b80, 0x451f2695, + 0x1b80, 0x451f2697, + 0x1b80, 0x460026a5, + 0x1b80, 0x460026a7, + 0x1b80, 0x470026b5, + 0x1b80, 0x470026b7, + 0x1b80, 0x48ff26c5, + 0x1b80, 0x48ff26c7, + 0x1b80, 0x491f26d5, + 0x1b80, 0x491f26d7, + 0x1b80, 0x4a0026e5, + 0x1b80, 0x4a0026e7, 0x1b80, 0x4b0026f5, 0x1b80, 0x4b0026f7, - 0x1b80, 0x00052705, - 0x1b80, 0x00052707, - 0x1b80, 0x5c802715, - 0x1b80, 0x5c802717, - 0x1b80, 0x63002725, - 0x1b80, 0x63002727, - 0x1b80, 0x00072735, - 0x1b80, 0x00072737, - 0x1b80, 0x78102745, - 0x1b80, 0x78102747, - 0x1b80, 0x79132755, - 0x1b80, 0x79132757, - 0x1b80, 0x7a002765, - 0x1b80, 0x7a002767, - 0x1b80, 0x7b802775, - 0x1b80, 0x7b802777, - 0x1b80, 0x7b002785, - 0x1b80, 0x7b002787, - 0x1b80, 0x78db2795, - 0x1b80, 0x78db2797, - 0x1b80, 0x790027a5, - 0x1b80, 0x790027a7, - 0x1b80, 0x7a0027b5, - 0x1b80, 0x7a0027b7, - 0x1b80, 0x7b8127c5, - 0x1b80, 0x7b8127c7, - 0x1b80, 0x7b0127d5, - 0x1b80, 0x7b0127d7, - 0x1b80, 0x782827e5, - 0x1b80, 0x782827e7, - 0x1b80, 0x79b427f5, - 0x1b80, 0x79b427f7, - 0x1b80, 0x7a002805, - 0x1b80, 0x7a002807, - 0x1b80, 0x7b002815, - 0x1b80, 0x7b002817, - 0x1b80, 0x00012825, - 0x1b80, 0x00012827, - 0x1b80, 0x00072835, - 0x1b80, 0x00072837, - 0x1b80, 0x783e2845, - 0x1b80, 0x783e2847, - 0x1b80, 0x79f92855, - 0x1b80, 0x79f92857, - 0x1b80, 0x7a012865, - 0x1b80, 0x7a012867, - 0x1b80, 0x7b822875, - 0x1b80, 0x7b822877, - 0x1b80, 0x7b022885, - 0x1b80, 0x7b022887, - 0x1b80, 0x78a92895, - 0x1b80, 0x78a92897, - 0x1b80, 0x79ed28a5, - 0x1b80, 0x79ed28a7, - 0x1b80, 0x7b8328b5, - 0x1b80, 0x7b8328b7, - 0x1b80, 0x7b0328c5, - 0x1b80, 0x7b0328c7, - 0x1b80, 0x782828d5, - 0x1b80, 0x782828d7, - 0x1b80, 0x79b428e5, - 0x1b80, 0x79b428e7, - 0x1b80, 0x7a0028f5, - 0x1b80, 0x7a0028f7, - 0x1b80, 0x7b002905, - 0x1b80, 0x7b002907, - 0x1b80, 0x00012915, - 0x1b80, 0x00012917, - 0x1b80, 0x00072925, - 0x1b80, 0x00072927, - 0x1b80, 0x78ae2935, - 0x1b80, 0x78ae2937, - 0x1b80, 0x79fa2945, - 0x1b80, 0x79fa2947, - 0x1b80, 0x7a012955, - 0x1b80, 0x7a012957, - 0x1b80, 0x7b802965, - 0x1b80, 0x7b802967, - 0x1b80, 0x7b002975, - 0x1b80, 0x7b002977, - 0x1b80, 0x787a2985, - 0x1b80, 0x787a2987, - 0x1b80, 0x79f12995, - 0x1b80, 0x79f12997, - 0x1b80, 0x7b8129a5, - 0x1b80, 0x7b8129a7, - 0x1b80, 0x7b0129b5, - 0x1b80, 0x7b0129b7, - 0x1b80, 0x782829c5, - 0x1b80, 0x782829c7, - 0x1b80, 0x79b429d5, - 0x1b80, 0x79b429d7, - 0x1b80, 0x7a0029e5, - 0x1b80, 0x7a0029e7, - 0x1b80, 0x7b0029f5, - 0x1b80, 0x7b0029f7, - 0x1b80, 0x00012a05, - 0x1b80, 0x00012a07, - 0x1b80, 0x00072a15, - 0x1b80, 0x00072a17, - 0x1b80, 0x75002a25, - 0x1b80, 0x75002a27, - 0x1b80, 0x76022a35, - 0x1b80, 0x76022a37, - 0x1b80, 0x77152a45, - 0x1b80, 0x77152a47, - 0x1b80, 0x00062a55, - 0x1b80, 0x00062a57, - 0x1b80, 0x74002a65, - 0x1b80, 0x74002a67, - 0x1b80, 0x76002a75, - 0x1b80, 0x76002a77, - 0x1b80, 0x77002a85, - 0x1b80, 0x77002a87, - 0x1b80, 0x75102a95, - 0x1b80, 0x75102a97, - 0x1b80, 0x75002aa5, - 0x1b80, 0x75002aa7, - 0x1b80, 0xb3002ab5, - 0x1b80, 0xb3002ab7, - 0x1b80, 0x93002ac5, - 0x1b80, 0x93002ac7, - 0x1b80, 0x00072ad5, - 0x1b80, 0x00072ad7, - 0x1b80, 0x76002ae5, - 0x1b80, 0x76002ae7, - 0x1b80, 0x77002af5, - 0x1b80, 0x77002af7, - 0x1b80, 0x00012b05, - 0x1b80, 0x00012b07, - 0x1b80, 0x00072b15, - 0x1b80, 0x00072b17, - 0x1b80, 0x75002b25, - 0x1b80, 0x75002b27, - 0x1b80, 0x76022b35, - 0x1b80, 0x76022b37, - 0x1b80, 0x77252b45, - 0x1b80, 0x77252b47, - 0x1b80, 0x00062b55, - 0x1b80, 0x00062b57, - 0x1b80, 0x74002b65, - 0x1b80, 0x74002b67, - 0x1b80, 0x76002b75, - 0x1b80, 0x76002b77, - 0x1b80, 0x77012b85, - 0x1b80, 0x77012b87, - 0x1b80, 0x75102b95, - 0x1b80, 0x75102b97, - 0x1b80, 0x75002ba5, - 0x1b80, 0x75002ba7, - 0x1b80, 0xb3002bb5, - 0x1b80, 0xb3002bb7, - 0x1b80, 0x93002bc5, - 0x1b80, 0x93002bc7, - 0x1b80, 0x00072bd5, - 0x1b80, 0x00072bd7, - 0x1b80, 0x76002be5, - 0x1b80, 0x76002be7, - 0x1b80, 0x77002bf5, - 0x1b80, 0x77002bf7, - 0x1b80, 0x00012c05, - 0x1b80, 0x00012c07, - 0x1b80, 0x00042c15, - 0x1b80, 0x00042c17, - 0x1b80, 0x44802c25, - 0x1b80, 0x44802c27, - 0x1b80, 0x47302c35, - 0x1b80, 0x47302c37, - 0x1b80, 0x00062c45, - 0x1b80, 0x00062c47, - 0x1b80, 0x776c2c55, - 0x1b80, 0x776c2c57, - 0x1b80, 0x00012c65, - 0x1b80, 0x00012c67, - 0x1b80, 0x00052c75, - 0x1b80, 0x00052c77, - 0x1b80, 0x5c802c85, - 0x1b80, 0x5c802c87, - 0x1b80, 0x5f302c95, - 0x1b80, 0x5f302c97, - 0x1b80, 0x00062ca5, - 0x1b80, 0x00062ca7, - 0x1b80, 0x776d2cb5, - 0x1b80, 0x776d2cb7, - 0x1b80, 0x00012cc5, - 0x1b80, 0x00012cc7, - 0x1b80, 0xb9002cd5, - 0x1b80, 0xb9002cd7, - 0x1b80, 0x99002ce5, - 0x1b80, 0x99002ce7, - 0x1b80, 0x00062cf5, - 0x1b80, 0x00062cf7, - 0x1b80, 0x77002d05, - 0x1b80, 0x77002d07, - 0x1b80, 0x98052d15, - 0x1b80, 0x98052d17, - 0x1b80, 0x00042d25, - 0x1b80, 0x00042d27, - 0x1b80, 0x40082d35, - 0x1b80, 0x40082d37, - 0x1b80, 0x4a022d45, - 0x1b80, 0x4a022d47, - 0x1b80, 0x30192d55, - 0x1b80, 0x30192d57, - 0x1b80, 0x00012d65, - 0x1b80, 0x00012d67, - 0x1b80, 0x7b482d75, - 0x1b80, 0x7b482d77, - 0x1b80, 0x7a902d85, - 0x1b80, 0x7a902d87, - 0x1b80, 0x79002d95, - 0x1b80, 0x79002d97, - 0x1b80, 0x55032da5, - 0x1b80, 0x55032da7, - 0x1b80, 0x32e32db5, - 0x1b80, 0x32e32db7, - 0x1b80, 0x7b382dc5, - 0x1b80, 0x7b382dc7, - 0x1b80, 0x7a802dd5, - 0x1b80, 0x7a802dd7, - 0x1b80, 0x550b2de5, - 0x1b80, 0x550b2de7, - 0x1b80, 0x32e32df5, - 0x1b80, 0x32e32df7, - 0x1b80, 0x7b402e05, - 0x1b80, 0x7b402e07, - 0x1b80, 0x7a002e15, - 0x1b80, 0x7a002e17, - 0x1b80, 0x55132e25, - 0x1b80, 0x55132e27, - 0x1b80, 0x74012e35, - 0x1b80, 0x74012e37, - 0x1b80, 0x74002e45, - 0x1b80, 0x74002e47, - 0x1b80, 0x8e002e55, - 0x1b80, 0x8e002e57, - 0x1b80, 0x00012e65, - 0x1b80, 0x00012e67, - 0x1b80, 0x57022e75, - 0x1b80, 0x57022e77, - 0x1b80, 0x57002e85, - 0x1b80, 0x57002e87, - 0x1b80, 0x97002e95, - 0x1b80, 0x97002e97, - 0x1b80, 0x00012ea5, - 0x1b80, 0x00012ea7, - 0x1b80, 0x4f782eb5, - 0x1b80, 0x4f782eb7, - 0x1b80, 0x53882ec5, - 0x1b80, 0x53882ec7, - 0x1b80, 0xe2f72ed5, - 0x1b80, 0xe2f72ed7, - 0x1b80, 0x54802ee5, - 0x1b80, 0x54802ee7, - 0x1b80, 0x54002ef5, - 0x1b80, 0x54002ef7, - 0x1b80, 0x54812f05, - 0x1b80, 0x54812f07, - 0x1b80, 0x54002f15, - 0x1b80, 0x54002f17, - 0x1b80, 0x54822f25, - 0x1b80, 0x54822f27, - 0x1b80, 0x54002f35, - 0x1b80, 0x54002f37, - 0x1b80, 0xe3022f45, - 0x1b80, 0xe3022f47, - 0x1b80, 0xbf1d2f55, - 0x1b80, 0xbf1d2f57, - 0x1b80, 0x30192f65, - 0x1b80, 0x30192f67, - 0x1b80, 0xe2d72f75, - 0x1b80, 0xe2d72f77, - 0x1b80, 0xe2dc2f85, - 0x1b80, 0xe2dc2f87, - 0x1b80, 0xe2e02f95, - 0x1b80, 0xe2e02f97, - 0x1b80, 0xe2e72fa5, - 0x1b80, 0xe2e72fa7, - 0x1b80, 0xe3412fb5, - 0x1b80, 0xe3412fb7, - 0x1b80, 0x55132fc5, - 0x1b80, 0x55132fc7, - 0x1b80, 0xe2e32fd5, - 0x1b80, 0xe2e32fd7, - 0x1b80, 0x55152fe5, - 0x1b80, 0x55152fe7, - 0x1b80, 0xe2e72ff5, - 0x1b80, 0xe2e72ff7, - 0x1b80, 0xe3413005, - 0x1b80, 0xe3413007, - 0x1b80, 0x00013015, - 0x1b80, 0x00013017, - 0x1b80, 0x54bf3025, - 0x1b80, 0x54bf3027, - 0x1b80, 0x54c03035, - 0x1b80, 0x54c03037, - 0x1b80, 0x54a33045, - 0x1b80, 0x54a33047, - 0x1b80, 0x54c13055, - 0x1b80, 0x54c13057, - 0x1b80, 0x54a43065, - 0x1b80, 0x54a43067, - 0x1b80, 0x4c183075, - 0x1b80, 0x4c183077, - 0x1b80, 0xbf073085, - 0x1b80, 0xbf073087, - 0x1b80, 0x54c23095, - 0x1b80, 0x54c23097, - 0x1b80, 0x54a430a5, - 0x1b80, 0x54a430a7, - 0x1b80, 0xbf0430b5, - 0x1b80, 0xbf0430b7, - 0x1b80, 0x54c130c5, - 0x1b80, 0x54c130c7, - 0x1b80, 0x54a330d5, - 0x1b80, 0x54a330d7, - 0x1b80, 0xbf0130e5, - 0x1b80, 0xbf0130e7, - 0x1b80, 0xe34f30f5, - 0x1b80, 0xe34f30f7, - 0x1b80, 0x54df3105, - 0x1b80, 0x54df3107, - 0x1b80, 0x00013115, - 0x1b80, 0x00013117, - 0x1b80, 0x54bf3125, - 0x1b80, 0x54bf3127, - 0x1b80, 0x54e53135, - 0x1b80, 0x54e53137, - 0x1b80, 0x050a3145, - 0x1b80, 0x050a3147, - 0x1b80, 0x54df3155, - 0x1b80, 0x54df3157, - 0x1b80, 0x00013165, - 0x1b80, 0x00013167, - 0x1b80, 0x7f403175, - 0x1b80, 0x7f403177, - 0x1b80, 0x7e003185, - 0x1b80, 0x7e003187, - 0x1b80, 0x7d003195, - 0x1b80, 0x7d003197, - 0x1b80, 0x550131a5, - 0x1b80, 0x550131a7, - 0x1b80, 0x5c3131b5, - 0x1b80, 0x5c3131b7, - 0x1b80, 0xe2e331c5, - 0x1b80, 0xe2e331c7, - 0x1b80, 0xe2e731d5, - 0x1b80, 0xe2e731d7, - 0x1b80, 0x548031e5, - 0x1b80, 0x548031e7, - 0x1b80, 0x540031f5, - 0x1b80, 0x540031f7, - 0x1b80, 0x54813205, - 0x1b80, 0x54813207, - 0x1b80, 0x54003215, - 0x1b80, 0x54003217, - 0x1b80, 0x54823225, - 0x1b80, 0x54823227, - 0x1b80, 0x54003235, - 0x1b80, 0x54003237, - 0x1b80, 0xe3023245, - 0x1b80, 0xe3023247, - 0x1b80, 0xbfed3255, - 0x1b80, 0xbfed3257, - 0x1b80, 0x30193265, - 0x1b80, 0x30193267, - 0x1b80, 0x74023275, - 0x1b80, 0x74023277, - 0x1b80, 0x003f3285, - 0x1b80, 0x003f3287, - 0x1b80, 0x74003295, - 0x1b80, 0x74003297, - 0x1b80, 0x000232a5, - 0x1b80, 0x000232a7, - 0x1b80, 0x000132b5, - 0x1b80, 0x000132b7, - 0x1b80, 0x000632c5, - 0x1b80, 0x000632c7, - 0x1b80, 0x5a8032d5, - 0x1b80, 0x5a8032d7, - 0x1b80, 0x5a0032e5, - 0x1b80, 0x5a0032e7, - 0x1b80, 0x920032f5, - 0x1b80, 0x920032f7, - 0x1b80, 0x00013305, - 0x1b80, 0x00013307, - 0x1b80, 0x5b8f3315, - 0x1b80, 0x5b8f3317, - 0x1b80, 0x5b0f3325, - 0x1b80, 0x5b0f3327, - 0x1b80, 0x91003335, - 0x1b80, 0x91003337, - 0x1b80, 0x00013345, - 0x1b80, 0x00013347, - 0x1b80, 0x00063355, - 0x1b80, 0x00063357, - 0x1b80, 0x5d803365, - 0x1b80, 0x5d803367, - 0x1b80, 0x5e563375, - 0x1b80, 0x5e563377, - 0x1b80, 0x00043385, - 0x1b80, 0x00043387, - 0x1b80, 0x4d083395, - 0x1b80, 0x4d083397, - 0x1b80, 0x571033a5, - 0x1b80, 0x571033a7, - 0x1b80, 0x570033b5, - 0x1b80, 0x570033b7, - 0x1b80, 0x4d0033c5, - 0x1b80, 0x4d0033c7, - 0x1b80, 0x000633d5, - 0x1b80, 0x000633d7, - 0x1b80, 0x5d0033e5, - 0x1b80, 0x5d0033e7, - 0x1b80, 0x000433f5, - 0x1b80, 0x000433f7, - 0x1b80, 0x00013405, - 0x1b80, 0x00013407, - 0x1b80, 0x549f3415, - 0x1b80, 0x549f3417, - 0x1b80, 0x54ff3425, - 0x1b80, 0x54ff3427, - 0x1b80, 0x54003435, - 0x1b80, 0x54003437, - 0x1b80, 0x00013445, - 0x1b80, 0x00013447, - 0x1b80, 0x5c313455, - 0x1b80, 0x5c313457, - 0x1b80, 0x07143465, - 0x1b80, 0x07143467, - 0x1b80, 0x54003475, - 0x1b80, 0x54003477, - 0x1b80, 0x5c323485, - 0x1b80, 0x5c323487, - 0x1b80, 0x00013495, - 0x1b80, 0x00013497, - 0x1b80, 0x5c3234a5, - 0x1b80, 0x5c3234a7, - 0x1b80, 0x071434b5, - 0x1b80, 0x071434b7, - 0x1b80, 0x540034c5, - 0x1b80, 0x540034c7, - 0x1b80, 0x5c3134d5, - 0x1b80, 0x5c3134d7, - 0x1b80, 0x000134e5, - 0x1b80, 0x000134e7, - 0x1b80, 0x4c9834f5, - 0x1b80, 0x4c9834f7, - 0x1b80, 0x4c183505, - 0x1b80, 0x4c183507, - 0x1b80, 0x00013515, - 0x1b80, 0x00013517, - 0x1b80, 0x5c323525, - 0x1b80, 0x5c323527, - 0x1b80, 0x62043535, - 0x1b80, 0x62043537, - 0x1b80, 0x63033545, - 0x1b80, 0x63033547, - 0x1b80, 0x66073555, - 0x1b80, 0x66073557, - 0x1b80, 0x7b403565, - 0x1b80, 0x7b403567, - 0x1b80, 0x7a003575, - 0x1b80, 0x7a003577, - 0x1b80, 0x79003585, - 0x1b80, 0x79003587, - 0x1b80, 0x7f403595, - 0x1b80, 0x7f403597, - 0x1b80, 0x7e0035a5, - 0x1b80, 0x7e0035a7, - 0x1b80, 0x7d0035b5, - 0x1b80, 0x7d0035b7, - 0x1b80, 0x090135c5, - 0x1b80, 0x090135c7, - 0x1b80, 0x0c0135d5, - 0x1b80, 0x0c0135d7, - 0x1b80, 0x0ba635e5, - 0x1b80, 0x0ba635e7, - 0x1b80, 0x000135f5, - 0x1b80, 0x000135f7, + 0x1b80, 0x00012705, + 0x1b80, 0x00012707, + 0x1b80, 0x4cff2715, + 0x1b80, 0x4cff2717, + 0x1b80, 0x4d1f2725, + 0x1b80, 0x4d1f2727, + 0x1b80, 0x4e002735, + 0x1b80, 0x4e002737, + 0x1b80, 0x4f002745, + 0x1b80, 0x4f002747, + 0x1b80, 0x50ff2755, + 0x1b80, 0x50ff2757, + 0x1b80, 0x511f2765, + 0x1b80, 0x511f2767, + 0x1b80, 0x52002775, + 0x1b80, 0x52002777, + 0x1b80, 0x53002785, + 0x1b80, 0x53002787, + 0x1b80, 0x54ff2795, + 0x1b80, 0x54ff2797, + 0x1b80, 0x551f27a5, + 0x1b80, 0x551f27a7, + 0x1b80, 0x560027b5, + 0x1b80, 0x560027b7, + 0x1b80, 0x570027c5, + 0x1b80, 0x570027c7, + 0x1b80, 0x58ff27d5, + 0x1b80, 0x58ff27d7, + 0x1b80, 0x591f27e5, + 0x1b80, 0x591f27e7, + 0x1b80, 0x5a0027f5, + 0x1b80, 0x5a0027f7, + 0x1b80, 0x5b002805, + 0x1b80, 0x5b002807, + 0x1b80, 0x00012815, + 0x1b80, 0x00012817, + 0x1b80, 0x5cff2825, + 0x1b80, 0x5cff2827, + 0x1b80, 0x5d1f2835, + 0x1b80, 0x5d1f2837, + 0x1b80, 0x5e002845, + 0x1b80, 0x5e002847, + 0x1b80, 0x5f002855, + 0x1b80, 0x5f002857, + 0x1b80, 0x60ff2865, + 0x1b80, 0x60ff2867, + 0x1b80, 0x611f2875, + 0x1b80, 0x611f2877, + 0x1b80, 0x62002885, + 0x1b80, 0x62002887, + 0x1b80, 0x63002895, + 0x1b80, 0x63002897, + 0x1b80, 0x000128a5, + 0x1b80, 0x000128a7, + 0x1b80, 0x64ff28b5, + 0x1b80, 0x64ff28b7, + 0x1b80, 0x651f28c5, + 0x1b80, 0x651f28c7, + 0x1b80, 0x660028d5, + 0x1b80, 0x660028d7, + 0x1b80, 0x670028e5, + 0x1b80, 0x670028e7, + 0x1b80, 0x68ff28f5, + 0x1b80, 0x68ff28f7, + 0x1b80, 0x691f2905, + 0x1b80, 0x691f2907, + 0x1b80, 0x6a002915, + 0x1b80, 0x6a002917, + 0x1b80, 0x6b002925, + 0x1b80, 0x6b002927, + 0x1b80, 0x6cff2935, + 0x1b80, 0x6cff2937, + 0x1b80, 0x6d1f2945, + 0x1b80, 0x6d1f2947, + 0x1b80, 0x6e002955, + 0x1b80, 0x6e002957, + 0x1b80, 0x6f002965, + 0x1b80, 0x6f002967, + 0x1b80, 0x70ff2975, + 0x1b80, 0x70ff2977, + 0x1b80, 0x711f2985, + 0x1b80, 0x711f2987, + 0x1b80, 0x72002995, + 0x1b80, 0x72002997, + 0x1b80, 0x730029a5, + 0x1b80, 0x730029a7, + 0x1b80, 0x000129b5, + 0x1b80, 0x000129b7, + 0x1b80, 0x70ff29c5, + 0x1b80, 0x70ff29c7, + 0x1b80, 0x711f29d5, + 0x1b80, 0x711f29d7, + 0x1b80, 0x720029e5, + 0x1b80, 0x720029e7, + 0x1b80, 0x730029f5, + 0x1b80, 0x730029f7, + 0x1b80, 0x74ff2a05, + 0x1b80, 0x74ff2a07, + 0x1b80, 0x751f2a15, + 0x1b80, 0x751f2a17, + 0x1b80, 0x76002a25, + 0x1b80, 0x76002a27, + 0x1b80, 0x77002a35, + 0x1b80, 0x77002a37, + 0x1b80, 0x78ff2a45, + 0x1b80, 0x78ff2a47, + 0x1b80, 0x791f2a55, + 0x1b80, 0x791f2a57, + 0x1b80, 0x7a002a65, + 0x1b80, 0x7a002a67, + 0x1b80, 0x7b002a75, + 0x1b80, 0x7b002a77, + 0x1b80, 0x7cff2a85, + 0x1b80, 0x7cff2a87, + 0x1b80, 0x7d1f2a95, + 0x1b80, 0x7d1f2a97, + 0x1b80, 0x7e002aa5, + 0x1b80, 0x7e002aa7, + 0x1b80, 0x7f002ab5, + 0x1b80, 0x7f002ab7, + 0x1b80, 0x00012ac5, + 0x1b80, 0x00012ac7, + 0x1b80, 0x00042ad5, + 0x1b80, 0x00042ad7, + 0x1b80, 0x49042ae5, + 0x1b80, 0x49042ae7, + 0x1b80, 0x4bb02af5, + 0x1b80, 0x4bb02af7, + 0x1b80, 0x00062b05, + 0x1b80, 0x00062b07, + 0x1b80, 0x75042b15, + 0x1b80, 0x75042b17, + 0x1b80, 0x77082b25, + 0x1b80, 0x77082b27, + 0x1b80, 0x00042b35, + 0x1b80, 0x00042b37, + 0x1b80, 0x44802b45, + 0x1b80, 0x44802b47, + 0x1b80, 0x45ff2b55, + 0x1b80, 0x45ff2b57, + 0x1b80, 0x463f2b65, + 0x1b80, 0x463f2b67, + 0x1b80, 0x47312b75, + 0x1b80, 0x47312b77, + 0x1b80, 0x40082b85, + 0x1b80, 0x40082b87, + 0x1b80, 0xe2db2b95, + 0x1b80, 0xe2db2b97, + 0x1b80, 0x00042ba5, + 0x1b80, 0x00042ba7, + 0x1b80, 0x400c2bb5, + 0x1b80, 0x400c2bb7, + 0x1b80, 0x00062bc5, + 0x1b80, 0x00062bc7, + 0x1b80, 0x75002bd5, + 0x1b80, 0x75002bd7, + 0x1b80, 0x00042be5, + 0x1b80, 0x00042be7, + 0x1b80, 0x445b2bf5, + 0x1b80, 0x445b2bf7, + 0x1b80, 0x47002c05, + 0x1b80, 0x47002c07, + 0x1b80, 0x40082c15, + 0x1b80, 0x40082c17, + 0x1b80, 0x00012c25, + 0x1b80, 0x00012c27, + 0x1b80, 0x00052c35, + 0x1b80, 0x00052c37, + 0x1b80, 0x61042c45, + 0x1b80, 0x61042c47, + 0x1b80, 0x63b02c55, + 0x1b80, 0x63b02c57, + 0x1b80, 0x00062c65, + 0x1b80, 0x00062c67, + 0x1b80, 0x75082c75, + 0x1b80, 0x75082c77, + 0x1b80, 0x77082c85, + 0x1b80, 0x77082c87, + 0x1b80, 0x00052c95, + 0x1b80, 0x00052c97, + 0x1b80, 0x5c802ca5, + 0x1b80, 0x5c802ca7, + 0x1b80, 0x5dff2cb5, + 0x1b80, 0x5dff2cb7, + 0x1b80, 0x5e3f2cc5, + 0x1b80, 0x5e3f2cc7, + 0x1b80, 0x5f312cd5, + 0x1b80, 0x5f312cd7, + 0x1b80, 0x00042ce5, + 0x1b80, 0x00042ce7, + 0x1b80, 0x400a2cf5, + 0x1b80, 0x400a2cf7, + 0x1b80, 0xe2db2d05, + 0x1b80, 0xe2db2d07, + 0x1b80, 0x00042d15, + 0x1b80, 0x00042d17, + 0x1b80, 0x400c2d25, + 0x1b80, 0x400c2d27, + 0x1b80, 0x00062d35, + 0x1b80, 0x00062d37, + 0x1b80, 0x75002d45, + 0x1b80, 0x75002d47, + 0x1b80, 0x00052d55, + 0x1b80, 0x00052d57, + 0x1b80, 0x5c5b2d65, + 0x1b80, 0x5c5b2d67, + 0x1b80, 0x5f002d75, + 0x1b80, 0x5f002d77, + 0x1b80, 0x00042d85, + 0x1b80, 0x00042d87, + 0x1b80, 0x40082d95, + 0x1b80, 0x40082d97, + 0x1b80, 0x00012da5, + 0x1b80, 0x00012da7, + 0x1b80, 0x00072db5, + 0x1b80, 0x00072db7, + 0x1b80, 0x4c122dc5, + 0x1b80, 0x4c122dc7, + 0x1b80, 0x4e202dd5, + 0x1b80, 0x4e202dd7, + 0x1b80, 0x00052de5, + 0x1b80, 0x00052de7, + 0x1b80, 0x598f2df5, + 0x1b80, 0x598f2df7, + 0x1b80, 0x40022e05, + 0x1b80, 0x40022e07, + 0x1b80, 0x4c012e15, + 0x1b80, 0x4c012e17, + 0x1b80, 0x4c002e25, + 0x1b80, 0x4c002e27, + 0x1b80, 0xab002e35, + 0x1b80, 0xab002e37, + 0x1b80, 0x40032e45, + 0x1b80, 0x40032e47, + 0x1b80, 0x49802e55, + 0x1b80, 0x49802e57, + 0x1b80, 0x56c02e65, + 0x1b80, 0x56c02e67, + 0x1b80, 0x54022e75, + 0x1b80, 0x54022e77, + 0x1b80, 0x4c012e85, + 0x1b80, 0x4c012e87, + 0x1b80, 0x4c002e95, + 0x1b80, 0x4c002e97, + 0x1b80, 0xab002ea5, + 0x1b80, 0xab002ea7, + 0x1b80, 0x54002eb5, + 0x1b80, 0x54002eb7, + 0x1b80, 0x00072ec5, + 0x1b80, 0x00072ec7, + 0x1b80, 0x4c002ed5, + 0x1b80, 0x4c002ed7, + 0x1b80, 0x4e002ee5, + 0x1b80, 0x4e002ee7, + 0x1b80, 0x00052ef5, + 0x1b80, 0x00052ef7, + 0x1b80, 0x40042f05, + 0x1b80, 0x40042f07, + 0x1b80, 0x4c012f15, + 0x1b80, 0x4c012f17, + 0x1b80, 0x4c002f25, + 0x1b80, 0x4c002f27, + 0x1b80, 0x00012f35, + 0x1b80, 0x00012f37, + 0x1b80, 0x00042f45, + 0x1b80, 0x00042f47, + 0x1b80, 0x44802f55, + 0x1b80, 0x44802f57, + 0x1b80, 0x4b002f65, + 0x1b80, 0x4b002f67, + 0x1b80, 0x00052f75, + 0x1b80, 0x00052f77, + 0x1b80, 0x5c802f85, + 0x1b80, 0x5c802f87, + 0x1b80, 0x63002f95, + 0x1b80, 0x63002f97, + 0x1b80, 0x00072fa5, + 0x1b80, 0x00072fa7, + 0x1b80, 0x780c2fb5, + 0x1b80, 0x780c2fb7, + 0x1b80, 0x79192fc5, + 0x1b80, 0x79192fc7, + 0x1b80, 0x7a002fd5, + 0x1b80, 0x7a002fd7, + 0x1b80, 0x7b822fe5, + 0x1b80, 0x7b822fe7, + 0x1b80, 0x7b022ff5, + 0x1b80, 0x7b022ff7, + 0x1b80, 0x78143005, + 0x1b80, 0x78143007, + 0x1b80, 0x79ee3015, + 0x1b80, 0x79ee3017, + 0x1b80, 0x7a013025, + 0x1b80, 0x7a013027, + 0x1b80, 0x7b833035, + 0x1b80, 0x7b833037, + 0x1b80, 0x7b033045, + 0x1b80, 0x7b033047, + 0x1b80, 0x78283055, + 0x1b80, 0x78283057, + 0x1b80, 0x79b43065, + 0x1b80, 0x79b43067, + 0x1b80, 0x7a003075, + 0x1b80, 0x7a003077, + 0x1b80, 0x7b003085, + 0x1b80, 0x7b003087, + 0x1b80, 0x00013095, + 0x1b80, 0x00013097, + 0x1b80, 0x000430a5, + 0x1b80, 0x000430a7, + 0x1b80, 0x448030b5, + 0x1b80, 0x448030b7, + 0x1b80, 0x4b0030c5, + 0x1b80, 0x4b0030c7, + 0x1b80, 0x000530d5, + 0x1b80, 0x000530d7, + 0x1b80, 0x5c8030e5, + 0x1b80, 0x5c8030e7, + 0x1b80, 0x630030f5, + 0x1b80, 0x630030f7, + 0x1b80, 0x00073105, + 0x1b80, 0x00073107, + 0x1b80, 0x78103115, + 0x1b80, 0x78103117, + 0x1b80, 0x79133125, + 0x1b80, 0x79133127, + 0x1b80, 0x7a003135, + 0x1b80, 0x7a003137, + 0x1b80, 0x7b803145, + 0x1b80, 0x7b803147, + 0x1b80, 0x7b003155, + 0x1b80, 0x7b003157, + 0x1b80, 0x78db3165, + 0x1b80, 0x78db3167, + 0x1b80, 0x79003175, + 0x1b80, 0x79003177, + 0x1b80, 0x7a003185, + 0x1b80, 0x7a003187, + 0x1b80, 0x7b813195, + 0x1b80, 0x7b813197, + 0x1b80, 0x7b0131a5, + 0x1b80, 0x7b0131a7, + 0x1b80, 0x782831b5, + 0x1b80, 0x782831b7, + 0x1b80, 0x79b431c5, + 0x1b80, 0x79b431c7, + 0x1b80, 0x7a0031d5, + 0x1b80, 0x7a0031d7, + 0x1b80, 0x7b0031e5, + 0x1b80, 0x7b0031e7, + 0x1b80, 0x000131f5, + 0x1b80, 0x000131f7, + 0x1b80, 0x00073205, + 0x1b80, 0x00073207, + 0x1b80, 0x783e3215, + 0x1b80, 0x783e3217, + 0x1b80, 0x79f93225, + 0x1b80, 0x79f93227, + 0x1b80, 0x7a013235, + 0x1b80, 0x7a013237, + 0x1b80, 0x7b823245, + 0x1b80, 0x7b823247, + 0x1b80, 0x7b023255, + 0x1b80, 0x7b023257, + 0x1b80, 0x78a93265, + 0x1b80, 0x78a93267, + 0x1b80, 0x79ed3275, + 0x1b80, 0x79ed3277, + 0x1b80, 0x7b833285, + 0x1b80, 0x7b833287, + 0x1b80, 0x7b033295, + 0x1b80, 0x7b033297, + 0x1b80, 0x782832a5, + 0x1b80, 0x782832a7, + 0x1b80, 0x79b432b5, + 0x1b80, 0x79b432b7, + 0x1b80, 0x7a0032c5, + 0x1b80, 0x7a0032c7, + 0x1b80, 0x7b0032d5, + 0x1b80, 0x7b0032d7, + 0x1b80, 0x000132e5, + 0x1b80, 0x000132e7, + 0x1b80, 0x000732f5, + 0x1b80, 0x000732f7, + 0x1b80, 0x78ae3305, + 0x1b80, 0x78ae3307, + 0x1b80, 0x79fa3315, + 0x1b80, 0x79fa3317, + 0x1b80, 0x7a013325, + 0x1b80, 0x7a013327, + 0x1b80, 0x7b803335, + 0x1b80, 0x7b803337, + 0x1b80, 0x7b003345, + 0x1b80, 0x7b003347, + 0x1b80, 0x787a3355, + 0x1b80, 0x787a3357, + 0x1b80, 0x79f13365, + 0x1b80, 0x79f13367, + 0x1b80, 0x7b813375, + 0x1b80, 0x7b813377, + 0x1b80, 0x7b013385, + 0x1b80, 0x7b013387, + 0x1b80, 0x78283395, + 0x1b80, 0x78283397, + 0x1b80, 0x79b433a5, + 0x1b80, 0x79b433a7, + 0x1b80, 0x7a0033b5, + 0x1b80, 0x7a0033b7, + 0x1b80, 0x7b0033c5, + 0x1b80, 0x7b0033c7, + 0x1b80, 0x000133d5, + 0x1b80, 0x000133d7, + 0x1b80, 0x000733e5, + 0x1b80, 0x000733e7, + 0x1b80, 0x750033f5, + 0x1b80, 0x750033f7, + 0x1b80, 0x76023405, + 0x1b80, 0x76023407, + 0x1b80, 0x77153415, + 0x1b80, 0x77153417, + 0x1b80, 0x00063425, + 0x1b80, 0x00063427, + 0x1b80, 0x74003435, + 0x1b80, 0x74003437, + 0x1b80, 0x76003445, + 0x1b80, 0x76003447, + 0x1b80, 0x77003455, + 0x1b80, 0x77003457, + 0x1b80, 0x75103465, + 0x1b80, 0x75103467, + 0x1b80, 0x75003475, + 0x1b80, 0x75003477, + 0x1b80, 0xb3003485, + 0x1b80, 0xb3003487, + 0x1b80, 0x93003495, + 0x1b80, 0x93003497, + 0x1b80, 0x000734a5, + 0x1b80, 0x000734a7, + 0x1b80, 0x760034b5, + 0x1b80, 0x760034b7, + 0x1b80, 0x770034c5, + 0x1b80, 0x770034c7, + 0x1b80, 0x000134d5, + 0x1b80, 0x000134d7, + 0x1b80, 0x000734e5, + 0x1b80, 0x000734e7, + 0x1b80, 0x750034f5, + 0x1b80, 0x750034f7, + 0x1b80, 0x76023505, + 0x1b80, 0x76023507, + 0x1b80, 0x77253515, + 0x1b80, 0x77253517, + 0x1b80, 0x00063525, + 0x1b80, 0x00063527, + 0x1b80, 0x74003535, + 0x1b80, 0x74003537, + 0x1b80, 0x76003545, + 0x1b80, 0x76003547, + 0x1b80, 0x77013555, + 0x1b80, 0x77013557, + 0x1b80, 0x75103565, + 0x1b80, 0x75103567, + 0x1b80, 0x75003575, + 0x1b80, 0x75003577, + 0x1b80, 0xb3003585, + 0x1b80, 0xb3003587, + 0x1b80, 0x93003595, + 0x1b80, 0x93003597, + 0x1b80, 0x000735a5, + 0x1b80, 0x000735a7, + 0x1b80, 0x760035b5, + 0x1b80, 0x760035b7, + 0x1b80, 0x770035c5, + 0x1b80, 0x770035c7, + 0x1b80, 0x000135d5, + 0x1b80, 0x000135d7, + 0x1b80, 0x000435e5, + 0x1b80, 0x000435e7, + 0x1b80, 0x448035f5, + 0x1b80, 0x448035f7, + 0x1b80, 0x47303605, + 0x1b80, 0x47303607, + 0x1b80, 0x00063615, + 0x1b80, 0x00063617, + 0x1b80, 0x776c3625, + 0x1b80, 0x776c3627, + 0x1b80, 0x00013635, + 0x1b80, 0x00013637, + 0x1b80, 0x00053645, + 0x1b80, 0x00053647, + 0x1b80, 0x5c803655, + 0x1b80, 0x5c803657, + 0x1b80, 0x5f303665, + 0x1b80, 0x5f303667, + 0x1b80, 0x00063675, + 0x1b80, 0x00063677, + 0x1b80, 0x776d3685, + 0x1b80, 0x776d3687, + 0x1b80, 0x00013695, + 0x1b80, 0x00013697, + 0x1b80, 0xb90036a5, + 0x1b80, 0xb90036a7, + 0x1b80, 0x990036b5, + 0x1b80, 0x990036b7, + 0x1b80, 0x000636c5, + 0x1b80, 0x000636c7, + 0x1b80, 0x770036d5, + 0x1b80, 0x770036d7, + 0x1b80, 0x980536e5, + 0x1b80, 0x980536e7, + 0x1b80, 0x000436f5, + 0x1b80, 0x000436f7, + 0x1b80, 0x40083705, + 0x1b80, 0x40083707, + 0x1b80, 0x4a023715, + 0x1b80, 0x4a023717, + 0x1b80, 0x30193725, + 0x1b80, 0x30193727, + 0x1b80, 0x00013735, + 0x1b80, 0x00013737, + 0x1b80, 0x7b483745, + 0x1b80, 0x7b483747, + 0x1b80, 0x7a903755, + 0x1b80, 0x7a903757, + 0x1b80, 0x79003765, + 0x1b80, 0x79003767, + 0x1b80, 0x55033775, + 0x1b80, 0x55033777, + 0x1b80, 0x33803785, + 0x1b80, 0x33803787, + 0x1b80, 0x7b383795, + 0x1b80, 0x7b383797, + 0x1b80, 0x7a8037a5, + 0x1b80, 0x7a8037a7, + 0x1b80, 0x550b37b5, + 0x1b80, 0x550b37b7, + 0x1b80, 0x338037c5, + 0x1b80, 0x338037c7, + 0x1b80, 0x7b4037d5, + 0x1b80, 0x7b4037d7, + 0x1b80, 0x7a0037e5, + 0x1b80, 0x7a0037e7, + 0x1b80, 0x551337f5, + 0x1b80, 0x551337f7, + 0x1b80, 0x74013805, + 0x1b80, 0x74013807, + 0x1b80, 0x74003815, + 0x1b80, 0x74003817, + 0x1b80, 0x8e003825, + 0x1b80, 0x8e003827, + 0x1b80, 0x00013835, + 0x1b80, 0x00013837, + 0x1b80, 0x57023845, + 0x1b80, 0x57023847, + 0x1b80, 0x57003855, + 0x1b80, 0x57003857, + 0x1b80, 0x97003865, + 0x1b80, 0x97003867, + 0x1b80, 0x00013875, + 0x1b80, 0x00013877, + 0x1b80, 0x4f783885, + 0x1b80, 0x4f783887, + 0x1b80, 0x53883895, + 0x1b80, 0x53883897, + 0x1b80, 0xe39438a5, + 0x1b80, 0xe39438a7, + 0x1b80, 0x548038b5, + 0x1b80, 0x548038b7, + 0x1b80, 0x540038c5, + 0x1b80, 0x540038c7, + 0x1b80, 0x548138d5, + 0x1b80, 0x548138d7, + 0x1b80, 0x540038e5, + 0x1b80, 0x540038e7, + 0x1b80, 0x548238f5, + 0x1b80, 0x548238f7, + 0x1b80, 0x54003905, + 0x1b80, 0x54003907, + 0x1b80, 0xe39f3915, + 0x1b80, 0xe39f3917, + 0x1b80, 0xbf1d3925, + 0x1b80, 0xbf1d3927, + 0x1b80, 0x30193935, + 0x1b80, 0x30193937, + 0x1b80, 0xe3743945, + 0x1b80, 0xe3743947, + 0x1b80, 0xe3793955, + 0x1b80, 0xe3793957, + 0x1b80, 0xe37d3965, + 0x1b80, 0xe37d3967, + 0x1b80, 0xe3843975, + 0x1b80, 0xe3843977, + 0x1b80, 0xe3de3985, + 0x1b80, 0xe3de3987, + 0x1b80, 0x55133995, + 0x1b80, 0x55133997, + 0x1b80, 0xe38039a5, + 0x1b80, 0xe38039a7, + 0x1b80, 0x551539b5, + 0x1b80, 0x551539b7, + 0x1b80, 0xe38439c5, + 0x1b80, 0xe38439c7, + 0x1b80, 0xe3de39d5, + 0x1b80, 0xe3de39d7, + 0x1b80, 0x000139e5, + 0x1b80, 0x000139e7, + 0x1b80, 0x54bf39f5, + 0x1b80, 0x54bf39f7, + 0x1b80, 0x54c03a05, + 0x1b80, 0x54c03a07, + 0x1b80, 0x54a33a15, + 0x1b80, 0x54a33a17, + 0x1b80, 0x54c13a25, + 0x1b80, 0x54c13a27, + 0x1b80, 0x54a43a35, + 0x1b80, 0x54a43a37, + 0x1b80, 0x4c183a45, + 0x1b80, 0x4c183a47, + 0x1b80, 0xbf073a55, + 0x1b80, 0xbf073a57, + 0x1b80, 0x54c23a65, + 0x1b80, 0x54c23a67, + 0x1b80, 0x54a43a75, + 0x1b80, 0x54a43a77, + 0x1b80, 0xbf043a85, + 0x1b80, 0xbf043a87, + 0x1b80, 0x54c13a95, + 0x1b80, 0x54c13a97, + 0x1b80, 0x54a33aa5, + 0x1b80, 0x54a33aa7, + 0x1b80, 0xbf013ab5, + 0x1b80, 0xbf013ab7, + 0x1b80, 0xe3ec3ac5, + 0x1b80, 0xe3ec3ac7, + 0x1b80, 0x54df3ad5, + 0x1b80, 0x54df3ad7, + 0x1b80, 0x00013ae5, + 0x1b80, 0x00013ae7, + 0x1b80, 0x54bf3af5, + 0x1b80, 0x54bf3af7, + 0x1b80, 0x54e53b05, + 0x1b80, 0x54e53b07, + 0x1b80, 0x050a3b15, + 0x1b80, 0x050a3b17, + 0x1b80, 0x54df3b25, + 0x1b80, 0x54df3b27, + 0x1b80, 0x00013b35, + 0x1b80, 0x00013b37, + 0x1b80, 0x7f403b45, + 0x1b80, 0x7f403b47, + 0x1b80, 0x7e003b55, + 0x1b80, 0x7e003b57, + 0x1b80, 0x7d003b65, + 0x1b80, 0x7d003b67, + 0x1b80, 0x55013b75, + 0x1b80, 0x55013b77, + 0x1b80, 0x5c313b85, + 0x1b80, 0x5c313b87, + 0x1b80, 0xe3803b95, + 0x1b80, 0xe3803b97, + 0x1b80, 0xe3843ba5, + 0x1b80, 0xe3843ba7, + 0x1b80, 0x54803bb5, + 0x1b80, 0x54803bb7, + 0x1b80, 0x54003bc5, + 0x1b80, 0x54003bc7, + 0x1b80, 0x54813bd5, + 0x1b80, 0x54813bd7, + 0x1b80, 0x54003be5, + 0x1b80, 0x54003be7, + 0x1b80, 0x54823bf5, + 0x1b80, 0x54823bf7, + 0x1b80, 0x54003c05, + 0x1b80, 0x54003c07, + 0x1b80, 0xe39f3c15, + 0x1b80, 0xe39f3c17, + 0x1b80, 0xbfed3c25, + 0x1b80, 0xbfed3c27, + 0x1b80, 0x30193c35, + 0x1b80, 0x30193c37, + 0x1b80, 0x74023c45, + 0x1b80, 0x74023c47, + 0x1b80, 0x003f3c55, + 0x1b80, 0x003f3c57, + 0x1b80, 0x74003c65, + 0x1b80, 0x74003c67, + 0x1b80, 0x00023c75, + 0x1b80, 0x00023c77, + 0x1b80, 0x00013c85, + 0x1b80, 0x00013c87, + 0x1b80, 0x00063c95, + 0x1b80, 0x00063c97, + 0x1b80, 0x5a803ca5, + 0x1b80, 0x5a803ca7, + 0x1b80, 0x5a003cb5, + 0x1b80, 0x5a003cb7, + 0x1b80, 0x92003cc5, + 0x1b80, 0x92003cc7, + 0x1b80, 0x00013cd5, + 0x1b80, 0x00013cd7, + 0x1b80, 0x5b8f3ce5, + 0x1b80, 0x5b8f3ce7, + 0x1b80, 0x5b0f3cf5, + 0x1b80, 0x5b0f3cf7, + 0x1b80, 0x91003d05, + 0x1b80, 0x91003d07, + 0x1b80, 0x00013d15, + 0x1b80, 0x00013d17, + 0x1b80, 0x00063d25, + 0x1b80, 0x00063d27, + 0x1b80, 0x5d803d35, + 0x1b80, 0x5d803d37, + 0x1b80, 0x5e563d45, + 0x1b80, 0x5e563d47, + 0x1b80, 0x00043d55, + 0x1b80, 0x00043d57, + 0x1b80, 0x4d083d65, + 0x1b80, 0x4d083d67, + 0x1b80, 0x57103d75, + 0x1b80, 0x57103d77, + 0x1b80, 0x57003d85, + 0x1b80, 0x57003d87, + 0x1b80, 0x4d003d95, + 0x1b80, 0x4d003d97, + 0x1b80, 0x00063da5, + 0x1b80, 0x00063da7, + 0x1b80, 0x5d003db5, + 0x1b80, 0x5d003db7, + 0x1b80, 0x00043dc5, + 0x1b80, 0x00043dc7, + 0x1b80, 0x00013dd5, + 0x1b80, 0x00013dd7, + 0x1b80, 0x549f3de5, + 0x1b80, 0x549f3de7, + 0x1b80, 0x54ff3df5, + 0x1b80, 0x54ff3df7, + 0x1b80, 0x54003e05, + 0x1b80, 0x54003e07, + 0x1b80, 0x00013e15, + 0x1b80, 0x00013e17, + 0x1b80, 0x5c313e25, + 0x1b80, 0x5c313e27, + 0x1b80, 0x07143e35, + 0x1b80, 0x07143e37, + 0x1b80, 0x54003e45, + 0x1b80, 0x54003e47, + 0x1b80, 0x5c323e55, + 0x1b80, 0x5c323e57, + 0x1b80, 0x00013e65, + 0x1b80, 0x00013e67, + 0x1b80, 0x5c323e75, + 0x1b80, 0x5c323e77, + 0x1b80, 0x07143e85, + 0x1b80, 0x07143e87, + 0x1b80, 0x54003e95, + 0x1b80, 0x54003e97, + 0x1b80, 0x5c313ea5, + 0x1b80, 0x5c313ea7, + 0x1b80, 0x00013eb5, + 0x1b80, 0x00013eb7, + 0x1b80, 0x4c983ec5, + 0x1b80, 0x4c983ec7, + 0x1b80, 0x4c183ed5, + 0x1b80, 0x4c183ed7, + 0x1b80, 0x00013ee5, + 0x1b80, 0x00013ee7, + 0x1b80, 0x5c323ef5, + 0x1b80, 0x5c323ef7, + 0x1b80, 0x62043f05, + 0x1b80, 0x62043f07, + 0x1b80, 0x63033f15, + 0x1b80, 0x63033f17, + 0x1b80, 0x66073f25, + 0x1b80, 0x66073f27, + 0x1b80, 0x7b403f35, + 0x1b80, 0x7b403f37, + 0x1b80, 0x7a003f45, + 0x1b80, 0x7a003f47, + 0x1b80, 0x79003f55, + 0x1b80, 0x79003f57, + 0x1b80, 0x7f403f65, + 0x1b80, 0x7f403f67, + 0x1b80, 0x7e003f75, + 0x1b80, 0x7e003f77, + 0x1b80, 0x7d003f85, + 0x1b80, 0x7d003f87, + 0x1b80, 0x09013f95, + 0x1b80, 0x09013f97, + 0x1b80, 0x0c013fa5, + 0x1b80, 0x0c013fa7, + 0x1b80, 0x0ba63fb5, + 0x1b80, 0x0ba63fb7, + 0x1b80, 0x00013fc5, + 0x1b80, 0x00013fc7, 0x1b80, 0x00000006, 0x1b80, 0x00000002, }; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h index 06e207dd8e5f..80c06c4f8184 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h @@ -12,6 +12,9 @@ extern const struct rtw_table rtw8822c_bb_pg_type0_tbl; extern const struct rtw_table rtw8822c_rf_a_tbl; extern const struct rtw_table rtw8822c_rf_b_tbl; extern const struct rtw_table rtw8822c_txpwr_lmt_type0_tbl; +extern const struct rtw_table rtw8822c_dpk_afe_no_dpk_tbl; +extern const struct rtw_table rtw8822c_dpk_afe_is_dpk_tbl; +extern const struct rtw_table rtw8822c_dpk_mac_bb_tbl; extern const struct rtw_table rtw8822c_array_mp_cal_init_tbl; #endif diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c index 4d837f0c6d5f..48b9ed49b79a 100644 --- a/drivers/net/wireless/realtek/rtw88/rx.c +++ b/drivers/net/wireless/realtek/rtw88/rx.c @@ -90,6 +90,7 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev, u8 *phy_status) { struct ieee80211_hw *hw = rtwdev->hw; + u8 path; memset(rx_status, 0, sizeof(*rx_status)); rx_status->freq = hw->conf.chandef.chan->center_freq; @@ -146,6 +147,10 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev, rx_status->bw = RATE_INFO_BW_20; rx_status->signal = pkt_stat->signal_power; + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + rx_status->chains |= BIT(path); + rx_status->chain_signal[path] = pkt_stat->rx_power[path]; + } rtw_rx_addr_match(rtwdev, pkt_stat, hdr); } diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 49df3bb08d41..ce5e92d82efc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1818,7 +1818,8 @@ out: return status; } -static int rsi_mac80211_cancel_roc(struct ieee80211_hw *hw) +static int rsi_mac80211_cancel_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index b42cd50b837e..1bebba4e8527 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -230,19 +230,16 @@ static void rsi_reset_card(struct sdio_func *pfunction) rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err); /* Issue CMD5, arg = 0 */ - if (!host->ocr_avail) { - err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0, - (MMC_RSP_R4 | MMC_CMD_BCR), &resp); - if (err) - rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", - __func__, err); - - host->ocr_avail = resp; - } + err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0, + (MMC_RSP_R4 | MMC_CMD_BCR), &resp); + if (err) + rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", + __func__, err); + card->ocr = resp; /* Issue CMD5, arg = ocr. Wait till card is ready */ for (i = 0; i < 100; i++) { err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, - host->ocr_avail, + card->ocr, (MMC_RSP_R4 | MMC_CMD_BCR), &resp); if (err) { rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", @@ -844,11 +841,11 @@ static int rsi_init_sdio_interface(struct rsi_hw *adapter, struct sdio_func *pfunction) { struct rsi_91x_sdiodev *rsi_91x_dev; - int status = -ENOMEM; + int status; rsi_91x_dev = kzalloc(sizeof(*rsi_91x_dev), GFP_KERNEL); if (!rsi_91x_dev) - return status; + return -ENOMEM; adapter->rsi_dev = rsi_91x_dev; @@ -890,7 +887,7 @@ static int rsi_init_sdio_interface(struct rsi_hw *adapter, #ifdef CONFIG_RSI_DEBUGFS adapter->num_debugfs_entries = MAX_DEBUGFS_ENTRIES; #endif - return status; + return 0; fail: sdio_disable_func(pfunction); sdio_release_host(pfunction); @@ -944,7 +941,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter) put_unaligned_le32(TA_HOLD_THREAD_VALUE, data); addr = TA_HOLD_THREAD_REG | RSI_SD_REQUEST_MASTER; status = rsi_sdio_write_register_multiple(adapter, addr, - (u8 *)&data, + (u8 *)data, RSI_9116_REG_SIZE); if (status < 0) { rsi_dbg(ERR_ZONE, "Unable to hold TA threads\n"); @@ -954,7 +951,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter) put_unaligned_le32(TA_SOFT_RST_CLR, data); addr = TA_SOFT_RESET_REG | RSI_SD_REQUEST_MASTER; status = rsi_sdio_write_register_multiple(adapter, addr, - (u8 *)&data, + (u8 *)data, RSI_9116_REG_SIZE); if (status < 0) { rsi_dbg(ERR_ZONE, "Unable to get TA out of reset\n"); @@ -964,7 +961,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter) put_unaligned_le32(TA_PC_ZERO, data); addr = TA_TH0_PC_REG | RSI_SD_REQUEST_MASTER; status = rsi_sdio_write_register_multiple(adapter, addr, - (u8 *)&data, + (u8 *)data, RSI_9116_REG_SIZE); if (status < 0) { rsi_dbg(ERR_ZONE, "Unable to Reset TA PC value\n"); @@ -975,7 +972,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter) put_unaligned_le32(TA_RELEASE_THREAD_VALUE, data); addr = TA_RELEASE_THREAD_REG | RSI_SD_REQUEST_MASTER; status = rsi_sdio_write_register_multiple(adapter, addr, - (u8 *)&data, + (u8 *)data, RSI_9116_REG_SIZE); if (status < 0) { rsi_dbg(ERR_ZONE, "Unable to release TA threads\n"); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index b74dc8bc9755..547ad538d8b6 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5749,7 +5749,8 @@ static void wlcore_roc_complete_work(struct work_struct *work) ieee80211_remain_on_channel_expired(wl->hw); } -static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw) +static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct wl1271 *wl = hw->priv; diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index a25b17932edb..007bf6803293 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -1226,7 +1226,6 @@ fail: static int wl3501_close(struct net_device *dev) { struct wl3501_card *this = netdev_priv(dev); - int rc = -ENODEV; unsigned long flags; struct pcmcia_device *link; link = this->p_dev; @@ -1241,10 +1240,9 @@ static int wl3501_close(struct net_device *dev) /* Mask interrupts from the SUTRO */ wl3501_block_interrupt(this); - rc = 0; printk(KERN_INFO "%s: WL3501 closed\n", dev->name); spin_unlock_irqrestore(&this->lock, flags); - return rc; + return 0; } /** diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c index 40c0a86dbfc7..0af4b1986e48 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c @@ -41,8 +41,7 @@ void zd_chip_clear(struct zd_chip *chip) static int scnprint_mac_oui(struct zd_chip *chip, char *buffer, size_t size) { u8 *addr = zd_mac_get_perm_addr(zd_chip_to_mac(chip)); - return scnprintf(buffer, size, "%02x-%02x-%02x", - addr[0], addr[1], addr[2]); + return scnprintf(buffer, size, "%3phD", addr); } /* Prints an identifier line, which will support debugging. */ diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index da7e63fca9f5..a9999d10ae81 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -223,7 +223,6 @@ void zd_mac_clear(struct zd_mac *mac) { flush_workqueue(zd_workqueue); zd_chip_clear(&mac->chip); - lockdep_assert_held(&mac->lock); ZD_MEMCLEAR(mac, sizeof(struct zd_mac)); } diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index 1965cd0fafc4..4e44ea8c652d 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -1597,11 +1597,6 @@ static int zd_ep_regs_out_msg(struct usb_device *udev, void *data, int len, } } -static int usb_int_regs_length(unsigned int count) -{ - return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data); -} - static void prepare_read_regs_int(struct zd_usb *usb, struct usb_req_read_regs *req, unsigned int count) @@ -1636,10 +1631,10 @@ static bool check_read_regs(struct zd_usb *usb, struct usb_req_read_regs *req, /* The created block size seems to be larger than expected. * However results appear to be correct. */ - if (rr->length < usb_int_regs_length(count)) { + if (rr->length < struct_size(regs, regs, count)) { dev_dbg_f(zd_usb_dev(usb), - "error: actual length %d less than expected %d\n", - rr->length, usb_int_regs_length(count)); + "error: actual length %d less than expected %ld\n", + rr->length, struct_size(regs, regs, count)); return false; } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c9262ffeefe4..0020b2e8c279 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -136,12 +136,12 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) static u16 frag_get_pending_idx(skb_frag_t *frag) { - return (u16)frag->page_offset; + return (u16)skb_frag_off(frag); } static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) { - frag->page_offset = pending_idx; + skb_frag_off_set(frag, pending_idx); } static inline pending_ring_idx_t pending_index(unsigned i) @@ -1057,7 +1057,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s int j; skb->truesize += skb->data_len; for (j = 0; j < i; j++) - put_page(frags[j].page.p); + put_page(skb_frag_page(&frags[j])); return -ENOMEM; } @@ -1069,8 +1069,8 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s BUG(); offset += len; - frags[i].page.p = page; - frags[i].page_offset = 0; + __skb_frag_set_page(&frags[i], page); + skb_frag_off_set(&frags[i], 0); skb_frag_size_set(&frags[i], len); } @@ -1655,9 +1655,6 @@ static int __init netback_init(void) #ifdef CONFIG_DEBUG_FS xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); - if (IS_ERR_OR_NULL(xen_netback_dbg_root)) - pr_warn("Init of debugfs returned %ld!\n", - PTR_ERR(xen_netback_dbg_root)); #endif /* CONFIG_DEBUG_FS */ return 0; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 41034264bd34..f533b7372d59 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -170,50 +170,26 @@ DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl); static void xenvif_debugfs_addif(struct xenvif *vif) { - struct dentry *pfile; int i; - if (IS_ERR_OR_NULL(xen_netback_dbg_root)) - return; - vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name, xen_netback_dbg_root); - if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) { - for (i = 0; i < vif->num_queues; ++i) { - char filename[sizeof("io_ring_q") + 4]; - - snprintf(filename, sizeof(filename), "io_ring_q%d", i); - pfile = debugfs_create_file(filename, - 0600, - vif->xenvif_dbg_root, - &vif->queues[i], - &xenvif_dbg_io_ring_ops_fops); - if (IS_ERR_OR_NULL(pfile)) - pr_warn("Creation of io_ring file returned %ld!\n", - PTR_ERR(pfile)); - } + for (i = 0; i < vif->num_queues; ++i) { + char filename[sizeof("io_ring_q") + 4]; - if (vif->ctrl_irq) { - pfile = debugfs_create_file("ctrl", - 0400, - vif->xenvif_dbg_root, - vif, - &xenvif_ctrl_fops); - if (IS_ERR_OR_NULL(pfile)) - pr_warn("Creation of ctrl file returned %ld!\n", - PTR_ERR(pfile)); - } - } else - netdev_warn(vif->dev, - "Creation of vif debugfs dir returned %ld!\n", - PTR_ERR(vif->xenvif_dbg_root)); + snprintf(filename, sizeof(filename), "io_ring_q%d", i); + debugfs_create_file(filename, 0600, vif->xenvif_dbg_root, + &vif->queues[i], + &xenvif_dbg_io_ring_ops_fops); + } + + if (vif->ctrl_irq) + debugfs_create_file("ctrl", 0400, vif->xenvif_dbg_root, vif, + &xenvif_ctrl_fops); } static void xenvif_debugfs_delif(struct xenvif *vif) { - if (IS_ERR_OR_NULL(xen_netback_dbg_root)) - return; - debugfs_remove_recursive(vif->xenvif_dbg_root); vif->xenvif_dbg_root = NULL; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8d33970a2950..e14ec75b61d6 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -531,7 +531,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb) for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned long size = skb_frag_size(frag); - unsigned long offset = frag->page_offset; + unsigned long offset = skb_frag_off(frag); /* Skip unused frames from start of page */ offset &= ~PAGE_MASK; @@ -674,8 +674,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* Requests for all the frags. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - tx = xennet_make_txreqs(queue, tx, skb, - skb_frag_page(frag), frag->page_offset, + tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag), + skb_frag_off(frag), skb_frag_size(frag)); } @@ -906,7 +906,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { - queue->rx.rsp_cons = ++cons; + queue->rx.rsp_cons = ++cons + skb_queue_len(list); kfree_skb(nskb); return ~0U; } @@ -1040,7 +1040,7 @@ err: if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; - skb_shinfo(skb)->frags[0].page_offset = rx->offset; + skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset); skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); skb->data_len = rx->status; skb->len += rx->status; diff --git a/drivers/nfc/nxp-nci/Kconfig b/drivers/nfc/nxp-nci/Kconfig index 12df2c8cc51d..e1f71deab6fc 100644 --- a/drivers/nfc/nxp-nci/Kconfig +++ b/drivers/nfc/nxp-nci/Kconfig @@ -2,10 +2,9 @@ config NFC_NXP_NCI tristate "NXP-NCI NFC driver" depends on NFC_NCI - default n ---help--- - Generic core driver for NXP NCI chips such as the NPC100 - or PN7150 families. + Generic core driver for NXP NCI chips such as the NPC100 (PN547), + NPC300 (PN548) or PN7150 families. This is a driver based on the NCI NFC kernel layers and will thus not work with NXP libnfc library. @@ -23,4 +22,4 @@ config NFC_NXP_NCI_I2C To compile this driver as a module, choose m here. The module will be called nxp_nci_i2c. - Say Y if unsure. + Say N if unsure. diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c index 8dafc696719f..a0ce95a287c5 100644 --- a/drivers/nfc/nxp-nci/core.c +++ b/drivers/nfc/nxp-nci/core.c @@ -11,10 +11,8 @@ */ #include <linux/delay.h> -#include <linux/gpio.h> #include <linux/module.h> #include <linux/nfc.h> -#include <linux/platform_data/nxp-nci.h> #include <net/nfc/nci_core.h> diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 4aeb3861b409..307bd2afbe05 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -12,8 +12,6 @@ * Copyright (C) 2012 Intel Corporation. All rights reserved. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/acpi.h> #include <linux/delay.h> #include <linux/i2c.h> @@ -21,9 +19,6 @@ #include <linux/module.h> #include <linux/nfc.h> #include <linux/gpio/consumer.h> -#include <linux/of_gpio.h> -#include <linux/of_irq.h> -#include <linux/platform_data/nxp-nci.h> #include <asm/unaligned.h> #include <net/nfc/nfc.h> @@ -38,8 +33,8 @@ struct nxp_nci_i2c_phy { struct i2c_client *i2c_dev; struct nci_dev *ndev; - unsigned int gpio_en; - unsigned int gpio_fw; + struct gpio_desc *gpiod_en; + struct gpio_desc *gpiod_fw; int hard_fault; /* * < 0 if hardware error occurred (e.g. i2c err) @@ -52,8 +47,8 @@ static int nxp_nci_i2c_set_mode(void *phy_id, { struct nxp_nci_i2c_phy *phy = (struct nxp_nci_i2c_phy *) phy_id; - gpio_set_value(phy->gpio_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0); - gpio_set_value(phy->gpio_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0); + gpiod_set_value(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0); + gpiod_set_value(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0); usleep_range(10000, 15000); if (mode == NXP_NCI_MODE_COLD) @@ -250,116 +245,55 @@ exit_irq_none: return IRQ_NONE; } -static int nxp_nci_i2c_parse_devtree(struct i2c_client *client) -{ - struct nxp_nci_i2c_phy *phy = i2c_get_clientdata(client); - struct device_node *pp; - int r; - - pp = client->dev.of_node; - if (!pp) - return -ENODEV; - - r = of_get_named_gpio(pp, "enable-gpios", 0); - if (r == -EPROBE_DEFER) - r = of_get_named_gpio(pp, "enable-gpios", 0); - if (r < 0) { - nfc_err(&client->dev, "Failed to get EN gpio, error: %d\n", r); - return r; - } - phy->gpio_en = r; - - r = of_get_named_gpio(pp, "firmware-gpios", 0); - if (r == -EPROBE_DEFER) - r = of_get_named_gpio(pp, "firmware-gpios", 0); - if (r < 0) { - nfc_err(&client->dev, "Failed to get FW gpio, error: %d\n", r); - return r; - } - phy->gpio_fw = r; - - return 0; -} - -static int nxp_nci_i2c_acpi_config(struct nxp_nci_i2c_phy *phy) -{ - struct i2c_client *client = phy->i2c_dev; - struct gpio_desc *gpiod_en, *gpiod_fw; +static const struct acpi_gpio_params firmware_gpios = { 1, 0, false }; +static const struct acpi_gpio_params enable_gpios = { 2, 0, false }; - gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2, GPIOD_OUT_LOW); - gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1, GPIOD_OUT_LOW); - - if (IS_ERR(gpiod_en) || IS_ERR(gpiod_fw)) { - nfc_err(&client->dev, "No GPIOs\n"); - return -EINVAL; - } - - phy->gpio_en = desc_to_gpio(gpiod_en); - phy->gpio_fw = desc_to_gpio(gpiod_fw); - - return 0; -} +static const struct acpi_gpio_mapping acpi_nxp_nci_gpios[] = { + { "enable-gpios", &enable_gpios, 1 }, + { "firmware-gpios", &firmware_gpios, 1 }, + { } +}; static int nxp_nci_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { + struct device *dev = &client->dev; struct nxp_nci_i2c_phy *phy; - struct nxp_nci_nfc_platform_data *pdata; int r; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { nfc_err(&client->dev, "Need I2C_FUNC_I2C\n"); - r = -ENODEV; - goto probe_exit; + return -ENODEV; } phy = devm_kzalloc(&client->dev, sizeof(struct nxp_nci_i2c_phy), GFP_KERNEL); - if (!phy) { - r = -ENOMEM; - goto probe_exit; - } + if (!phy) + return -ENOMEM; phy->i2c_dev = client; i2c_set_clientdata(client, phy); - pdata = client->dev.platform_data; - - if (!pdata && client->dev.of_node) { - r = nxp_nci_i2c_parse_devtree(client); - if (r < 0) { - nfc_err(&client->dev, "Failed to get DT data\n"); - goto probe_exit; - } - } else if (pdata) { - phy->gpio_en = pdata->gpio_en; - phy->gpio_fw = pdata->gpio_fw; - } else if (ACPI_HANDLE(&client->dev)) { - r = nxp_nci_i2c_acpi_config(phy); - if (r < 0) - goto probe_exit; - goto nci_probe; - } else { - nfc_err(&client->dev, "No platform data\n"); - r = -EINVAL; - goto probe_exit; - } + r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios); + if (r) + return r; - r = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_en, - GPIOF_OUT_INIT_LOW, "nxp_nci_en"); - if (r < 0) - goto probe_exit; + phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(phy->gpiod_en)) { + nfc_err(dev, "Failed to get EN gpio\n"); + return PTR_ERR(phy->gpiod_en); + } - r = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_fw, - GPIOF_OUT_INIT_LOW, "nxp_nci_fw"); - if (r < 0) - goto probe_exit; + phy->gpiod_fw = devm_gpiod_get(dev, "firmware", GPIOD_OUT_LOW); + if (IS_ERR(phy->gpiod_fw)) { + nfc_err(dev, "Failed to get FW gpio\n"); + return PTR_ERR(phy->gpiod_fw); + } -nci_probe: r = nxp_nci_probe(phy, &client->dev, &i2c_phy_ops, NXP_NCI_I2C_MAX_PAYLOAD, &phy->ndev); if (r < 0) - goto probe_exit; + return r; r = request_threaded_irq(client->irq, NULL, nxp_nci_i2c_irq_thread_fn, @@ -368,7 +302,6 @@ nci_probe: if (r < 0) nfc_err(&client->dev, "Unable to register IRQ handler\n"); -probe_exit: return r; } @@ -390,14 +323,15 @@ MODULE_DEVICE_TABLE(i2c, nxp_nci_i2c_id_table); static const struct of_device_id of_nxp_nci_i2c_match[] = { { .compatible = "nxp,nxp-nci-i2c", }, - {}, + {} }; MODULE_DEVICE_TABLE(of, of_nxp_nci_i2c_match); #ifdef CONFIG_ACPI -static struct acpi_device_id acpi_id[] = { +static const struct acpi_device_id acpi_id[] = { + { "NXP1001" }, { "NXP7471" }, - { }, + { } }; MODULE_DEVICE_TABLE(acpi, acpi_id); #endif @@ -406,7 +340,7 @@ static struct i2c_driver nxp_nci_i2c_driver = { .driver = { .name = NXP_NCI_I2C_DRIVER_NAME, .acpi_match_table = ACPI_PTR(acpi_id), - .of_match_table = of_match_ptr(of_nxp_nci_i2c_match), + .of_match_table = of_nxp_nci_i2c_match, }, .probe = nxp_nci_i2c_probe, .id_table = nxp_nci_i2c_id_table, diff --git a/drivers/nfc/nxp-nci/nxp-nci.h b/drivers/nfc/nxp-nci/nxp-nci.h index 6fe7c45544bf..ae3fb2735a4e 100644 --- a/drivers/nfc/nxp-nci/nxp-nci.h +++ b/drivers/nfc/nxp-nci/nxp-nci.h @@ -14,7 +14,6 @@ #include <linux/completion.h> #include <linux/firmware.h> #include <linux/nfc.h> -#include <linux/platform_data/nxp-nci.h> #include <net/nfc/nci_core.h> diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 2ab92409210a..c313de96a357 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -182,6 +182,7 @@ config PCI_LABEL config PCI_HYPERV tristate "Hyper-V PCI Frontend" depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 + select PCI_HYPERV_INTERFACE help The PCI device frontend driver allows the kernel to import arbitrary PCI devices from a PCI backend to support PCI driver domains. diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index fe9f9f13ce11..70e078238899 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -281,5 +281,12 @@ config VMD To compile this driver as a module, choose M here: the module will be called vmd. +config PCI_HYPERV_INTERFACE + tristate "Hyper-V PCI Interface" + depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 + help + The Hyper-V PCI Interface is a helper driver allows other drivers to + have a common interface with the Hyper-V PCI frontend driver. + source "drivers/pci/controller/dwc/Kconfig" endmenu diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index d56a507495c5..a2a22c9d91af 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o +obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c new file mode 100644 index 000000000000..cc96be450360 --- /dev/null +++ b/drivers/pci/controller/pci-hyperv-intf.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Microsoft Corporation. + * + * Author: + * Haiyang Zhang <haiyangz@microsoft.com> + * + * This small module is a helper driver allows other drivers to + * have a common interface with the Hyper-V PCI frontend driver. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/hyperv.h> + +struct hyperv_pci_block_ops hvpci_block_ops; +EXPORT_SYMBOL_GPL(hvpci_block_ops); + +int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, + unsigned int block_id, unsigned int *bytes_returned) +{ + if (!hvpci_block_ops.read_block) + return -EOPNOTSUPP; + + return hvpci_block_ops.read_block(dev, buf, buf_len, block_id, + bytes_returned); +} +EXPORT_SYMBOL_GPL(hyperv_read_cfg_blk); + +int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, + unsigned int block_id) +{ + if (!hvpci_block_ops.write_block) + return -EOPNOTSUPP; + + return hvpci_block_ops.write_block(dev, buf, len, block_id); +} +EXPORT_SYMBOL_GPL(hyperv_write_cfg_blk); + +int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)) +{ + if (!hvpci_block_ops.reg_blk_invalidate) + return -EOPNOTSUPP; + + return hvpci_block_ops.reg_blk_invalidate(dev, context, + block_invalidate); +} +EXPORT_SYMBOL_GPL(hyperv_reg_block_invalidate); + +static void __exit exit_hv_pci_intf(void) +{ +} + +static int __init init_hv_pci_intf(void) +{ + return 0; +} + +module_init(init_hv_pci_intf); +module_exit(exit_hv_pci_intf); + +MODULE_DESCRIPTION("Hyper-V PCI Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 97056f3dd317..0ca73c851e0f 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -365,6 +365,39 @@ struct pci_delete_interrupt { struct tran_int_desc int_desc; } __packed; +/* + * Note: the VM must pass a valid block id, wslot and bytes_requested. + */ +struct pci_read_block { + struct pci_message message_type; + u32 block_id; + union win_slot_encoding wslot; + u32 bytes_requested; +} __packed; + +struct pci_read_block_response { + struct vmpacket_descriptor hdr; + u32 status; + u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX]; +} __packed; + +/* + * Note: the VM must pass a valid block id, wslot and byte_count. + */ +struct pci_write_block { + struct pci_message message_type; + u32 block_id; + union win_slot_encoding wslot; + u32 byte_count; + u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX]; +} __packed; + +struct pci_dev_inval_block { + struct pci_incoming_message incoming; + union win_slot_encoding wslot; + u64 block_mask; +} __packed; + struct pci_dev_incoming { struct pci_incoming_message incoming; union win_slot_encoding wslot; @@ -499,6 +532,9 @@ struct hv_pci_dev { struct hv_pcibus_device *hbus; struct work_struct wrk; + void (*block_invalidate)(void *context, u64 block_mask); + void *invalidate_context; + /* * What would be observed if one wrote 0xFFFFFFFF to a BAR and then * read it back, for each of the BAR offsets within config space. @@ -817,6 +853,253 @@ static struct pci_ops hv_pcifront_ops = { .write = hv_pcifront_write_config, }; +/* + * Paravirtual backchannel + * + * Hyper-V SR-IOV provides a backchannel mechanism in software for + * communication between a VF driver and a PF driver. These + * "configuration blocks" are similar in concept to PCI configuration space, + * but instead of doing reads and writes in 32-bit chunks through a very slow + * path, packets of up to 128 bytes can be sent or received asynchronously. + * + * Nearly every SR-IOV device contains just such a communications channel in + * hardware, so using this one in software is usually optional. Using the + * software channel, however, allows driver implementers to leverage software + * tools that fuzz the communications channel looking for vulnerabilities. + * + * The usage model for these packets puts the responsibility for reading or + * writing on the VF driver. The VF driver sends a read or a write packet, + * indicating which "block" is being referred to by number. + * + * If the PF driver wishes to initiate communication, it can "invalidate" one or + * more of the first 64 blocks. This invalidation is delivered via a callback + * supplied by the VF driver by this driver. + * + * No protocol is implied, except that supplied by the PF and VF drivers. + */ + +struct hv_read_config_compl { + struct hv_pci_compl comp_pkt; + void *buf; + unsigned int len; + unsigned int bytes_returned; +}; + +/** + * hv_pci_read_config_compl() - Invoked when a response packet + * for a read config block operation arrives. + * @context: Identifies the read config operation + * @resp: The response packet itself + * @resp_packet_size: Size in bytes of the response packet + */ +static void hv_pci_read_config_compl(void *context, struct pci_response *resp, + int resp_packet_size) +{ + struct hv_read_config_compl *comp = context; + struct pci_read_block_response *read_resp = + (struct pci_read_block_response *)resp; + unsigned int data_len, hdr_len; + + hdr_len = offsetof(struct pci_read_block_response, bytes); + if (resp_packet_size < hdr_len) { + comp->comp_pkt.completion_status = -1; + goto out; + } + + data_len = resp_packet_size - hdr_len; + if (data_len > 0 && read_resp->status == 0) { + comp->bytes_returned = min(comp->len, data_len); + memcpy(comp->buf, read_resp->bytes, comp->bytes_returned); + } else { + comp->bytes_returned = 0; + } + + comp->comp_pkt.completion_status = read_resp->status; +out: + complete(&comp->comp_pkt.host_event); +} + +/** + * hv_read_config_block() - Sends a read config block request to + * the back-end driver running in the Hyper-V parent partition. + * @pdev: The PCI driver's representation for this device. + * @buf: Buffer into which the config block will be copied. + * @len: Size in bytes of buf. + * @block_id: Identifies the config block which has been requested. + * @bytes_returned: Size which came back from the back-end driver. + * + * Return: 0 on success, -errno on failure + */ +int hv_read_config_block(struct pci_dev *pdev, void *buf, unsigned int len, + unsigned int block_id, unsigned int *bytes_returned) +{ + struct hv_pcibus_device *hbus = + container_of(pdev->bus->sysdata, struct hv_pcibus_device, + sysdata); + struct { + struct pci_packet pkt; + char buf[sizeof(struct pci_read_block)]; + } pkt; + struct hv_read_config_compl comp_pkt; + struct pci_read_block *read_blk; + int ret; + + if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX) + return -EINVAL; + + init_completion(&comp_pkt.comp_pkt.host_event); + comp_pkt.buf = buf; + comp_pkt.len = len; + + memset(&pkt, 0, sizeof(pkt)); + pkt.pkt.completion_func = hv_pci_read_config_compl; + pkt.pkt.compl_ctxt = &comp_pkt; + read_blk = (struct pci_read_block *)&pkt.pkt.message; + read_blk->message_type.type = PCI_READ_BLOCK; + read_blk->wslot.slot = devfn_to_wslot(pdev->devfn); + read_blk->block_id = block_id; + read_blk->bytes_requested = len; + + ret = vmbus_sendpacket(hbus->hdev->channel, read_blk, + sizeof(*read_blk), (unsigned long)&pkt.pkt, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret) + return ret; + + ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event); + if (ret) + return ret; + + if (comp_pkt.comp_pkt.completion_status != 0 || + comp_pkt.bytes_returned == 0) { + dev_err(&hbus->hdev->device, + "Read Config Block failed: 0x%x, bytes_returned=%d\n", + comp_pkt.comp_pkt.completion_status, + comp_pkt.bytes_returned); + return -EIO; + } + + *bytes_returned = comp_pkt.bytes_returned; + return 0; +} + +/** + * hv_pci_write_config_compl() - Invoked when a response packet for a write + * config block operation arrives. + * @context: Identifies the write config operation + * @resp: The response packet itself + * @resp_packet_size: Size in bytes of the response packet + */ +static void hv_pci_write_config_compl(void *context, struct pci_response *resp, + int resp_packet_size) +{ + struct hv_pci_compl *comp_pkt = context; + + comp_pkt->completion_status = resp->status; + complete(&comp_pkt->host_event); +} + +/** + * hv_write_config_block() - Sends a write config block request to the + * back-end driver running in the Hyper-V parent partition. + * @pdev: The PCI driver's representation for this device. + * @buf: Buffer from which the config block will be copied. + * @len: Size in bytes of buf. + * @block_id: Identifies the config block which is being written. + * + * Return: 0 on success, -errno on failure + */ +int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len, + unsigned int block_id) +{ + struct hv_pcibus_device *hbus = + container_of(pdev->bus->sysdata, struct hv_pcibus_device, + sysdata); + struct { + struct pci_packet pkt; + char buf[sizeof(struct pci_write_block)]; + u32 reserved; + } pkt; + struct hv_pci_compl comp_pkt; + struct pci_write_block *write_blk; + u32 pkt_size; + int ret; + + if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX) + return -EINVAL; + + init_completion(&comp_pkt.host_event); + + memset(&pkt, 0, sizeof(pkt)); + pkt.pkt.completion_func = hv_pci_write_config_compl; + pkt.pkt.compl_ctxt = &comp_pkt; + write_blk = (struct pci_write_block *)&pkt.pkt.message; + write_blk->message_type.type = PCI_WRITE_BLOCK; + write_blk->wslot.slot = devfn_to_wslot(pdev->devfn); + write_blk->block_id = block_id; + write_blk->byte_count = len; + memcpy(write_blk->bytes, buf, len); + pkt_size = offsetof(struct pci_write_block, bytes) + len; + /* + * This quirk is required on some hosts shipped around 2018, because + * these hosts don't check the pkt_size correctly (new hosts have been + * fixed since early 2019). The quirk is also safe on very old hosts + * and new hosts, because, on them, what really matters is the length + * specified in write_blk->byte_count. + */ + pkt_size += sizeof(pkt.reserved); + + ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size, + (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret) + return ret; + + ret = wait_for_response(hbus->hdev, &comp_pkt.host_event); + if (ret) + return ret; + + if (comp_pkt.completion_status != 0) { + dev_err(&hbus->hdev->device, + "Write Config Block failed: 0x%x\n", + comp_pkt.completion_status); + return -EIO; + } + + return 0; +} + +/** + * hv_register_block_invalidate() - Invoked when a config block invalidation + * arrives from the back-end driver. + * @pdev: The PCI driver's representation for this device. + * @context: Identifies the device. + * @block_invalidate: Identifies all of the blocks being invalidated. + * + * Return: 0 on success, -errno on failure + */ +int hv_register_block_invalidate(struct pci_dev *pdev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)) +{ + struct hv_pcibus_device *hbus = + container_of(pdev->bus->sysdata, struct hv_pcibus_device, + sysdata); + struct hv_pci_dev *hpdev; + + hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); + if (!hpdev) + return -ENODEV; + + hpdev->block_invalidate = block_invalidate; + hpdev->invalidate_context = context; + + put_pcichild(hpdev); + return 0; + +} + /* Interrupt management hooks */ static void hv_int_desc_free(struct hv_pci_dev *hpdev, struct tran_int_desc *int_desc) @@ -1968,6 +2251,7 @@ static void hv_pci_onchannelcallback(void *context) struct pci_response *response; struct pci_incoming_message *new_message; struct pci_bus_relations *bus_rel; + struct pci_dev_inval_block *inval; struct pci_dev_incoming *dev_message; struct hv_pci_dev *hpdev; @@ -2045,6 +2329,21 @@ static void hv_pci_onchannelcallback(void *context) } break; + case PCI_INVALIDATE_BLOCK: + + inval = (struct pci_dev_inval_block *)buffer; + hpdev = get_pcichild_wslot(hbus, + inval->wslot.slot); + if (hpdev) { + if (hpdev->block_invalidate) { + hpdev->block_invalidate( + hpdev->invalidate_context, + inval->block_mask); + } + put_pcichild(hpdev); + } + break; + default: dev_warn(&hbus->hdev->device, "Unimplemented protocol message %x\n", @@ -2751,10 +3050,19 @@ static struct hv_driver hv_pci_drv = { static void __exit exit_hv_pci_drv(void) { vmbus_driver_unregister(&hv_pci_drv); + + hvpci_block_ops.read_block = NULL; + hvpci_block_ops.write_block = NULL; + hvpci_block_ops.reg_blk_invalidate = NULL; } static int __init init_hv_pci_drv(void) { + /* Initialize PCI block r/w interface */ + hvpci_block_ops.read_block = hv_read_config_block; + hvpci_block_ops.write_block = hv_write_config_block; + hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate; + return vmbus_driver_register(&hv_pci_drv); } diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 18ffe449efdf..9c18476d8d10 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -126,7 +126,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) switch (cmd) { case PTP_CLOCK_GETCAPS: + case PTP_CLOCK_GETCAPS2: memset(&caps, 0, sizeof(caps)); + caps.max_adj = ptp->info->max_adj; caps.n_alarm = ptp->info->n_alarm; caps.n_ext_ts = ptp->info->n_ext_ts; @@ -139,11 +141,24 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_EXTTS_REQUEST: + case PTP_EXTTS_REQUEST2: + memset(&req, 0, sizeof(req)); + if (copy_from_user(&req.extts, (void __user *)arg, sizeof(req.extts))) { err = -EFAULT; break; } + if (((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) || + req.extts.rsv[0] || req.extts.rsv[1]) && + cmd == PTP_EXTTS_REQUEST2) { + err = -EINVAL; + break; + } else if (cmd == PTP_EXTTS_REQUEST) { + req.extts.flags &= ~PTP_EXTTS_VALID_FLAGS; + req.extts.rsv[0] = 0; + req.extts.rsv[1] = 0; + } if (req.extts.index >= ops->n_ext_ts) { err = -EINVAL; break; @@ -154,11 +169,27 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_PEROUT_REQUEST: + case PTP_PEROUT_REQUEST2: + memset(&req, 0, sizeof(req)); + if (copy_from_user(&req.perout, (void __user *)arg, sizeof(req.perout))) { err = -EFAULT; break; } + if (((req.perout.flags & ~PTP_PEROUT_VALID_FLAGS) || + req.perout.rsv[0] || req.perout.rsv[1] || + req.perout.rsv[2] || req.perout.rsv[3]) && + cmd == PTP_PEROUT_REQUEST2) { + err = -EINVAL; + break; + } else if (cmd == PTP_PEROUT_REQUEST) { + req.perout.flags &= ~PTP_PEROUT_VALID_FLAGS; + req.perout.rsv[0] = 0; + req.perout.rsv[1] = 0; + req.perout.rsv[2] = 0; + req.perout.rsv[3] = 0; + } if (req.perout.index >= ops->n_per_out) { err = -EINVAL; break; @@ -169,6 +200,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_ENABLE_PPS: + case PTP_ENABLE_PPS2: + memset(&req, 0, sizeof(req)); + if (!capable(CAP_SYS_TIME)) return -EPERM; req.type = PTP_CLK_REQ_PPS; @@ -177,6 +211,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_SYS_OFFSET_PRECISE: + case PTP_SYS_OFFSET_PRECISE2: if (!ptp->info->getcrosststamp) { err = -EOPNOTSUPP; break; @@ -201,6 +236,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_SYS_OFFSET_EXTENDED: + case PTP_SYS_OFFSET_EXTENDED2: if (!ptp->info->gettimex64) { err = -EOPNOTSUPP; break; @@ -232,6 +268,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_SYS_OFFSET: + case PTP_SYS_OFFSET2: sysoff = memdup_user((void __user *)arg, sizeof(*sysoff)); if (IS_ERR(sysoff)) { err = PTR_ERR(sysoff); @@ -266,10 +303,23 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_PIN_GETFUNC: + case PTP_PIN_GETFUNC2: if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) { err = -EFAULT; break; } + if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2] + || pd.rsv[3] || pd.rsv[4]) + && cmd == PTP_PIN_GETFUNC2) { + err = -EINVAL; + break; + } else if (cmd == PTP_PIN_GETFUNC) { + pd.rsv[0] = 0; + pd.rsv[1] = 0; + pd.rsv[2] = 0; + pd.rsv[3] = 0; + pd.rsv[4] = 0; + } pin_index = pd.index; if (pin_index >= ops->n_pins) { err = -EINVAL; @@ -285,10 +335,23 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_PIN_SETFUNC: + case PTP_PIN_SETFUNC2: if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) { err = -EFAULT; break; } + if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2] + || pd.rsv[3] || pd.rsv[4]) + && cmd == PTP_PIN_SETFUNC2) { + err = -EINVAL; + break; + } else if (cmd == PTP_PIN_SETFUNC) { + pd.rsv[0] = 0; + pd.rsv[1] = 0; + pd.rsv[2] = 0; + pd.rsv[3] = 0; + pd.rsv[4] = 0; + } pin_index = pd.index; if (pin_index >= ops->n_pins) { err = -EINVAL; diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c index 5b6393e3ea27..0dcfdc806f57 100644 --- a/drivers/ptp/ptp_dte.c +++ b/drivers/ptp/ptp_dte.c @@ -248,11 +248,8 @@ static int ptp_dte_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ptp_dte->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(ptp_dte->regs)) { - dev_err(dev, - "%s: io remap failed\n", __func__); + if (IS_ERR(ptp_dte->regs)) return PTR_ERR(ptp_dte->regs); - } spin_lock_init(&ptp_dte->lock); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index a06944399865..a58b45df95d7 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -206,8 +206,6 @@ struct qdio_output_q { struct qdio_outbuf_state *sbal_state; /* timer to check for more outbound work */ struct timer_list timer; - /* used SBALs before tasklet schedule */ - int scan_threshold; }; /* @@ -295,6 +293,7 @@ struct qdio_irq { struct qdio_ssqd_desc ssqd_desc; void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); + unsigned int scan_threshold; /* used SBALs before tasklet schedule */ int perf_stat_enabled; struct qdr *qdr; diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4142c85e77d8..5b63c505a2f7 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -647,8 +647,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count) qperf_inc(q, outbound_handler); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", start, count); - if (q->u.out.use_cq) - qdio_handle_aobs(q, start, count); } q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, @@ -774,8 +772,11 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start) count = get_outbound_buffer_frontier(q, start); - if (count) + if (count) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); + if (q->u.out.use_cq) + qdio_handle_aobs(q, start, count); + } return count; } @@ -879,7 +880,7 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq) struct qdio_q *out; int i; - if (!pci_out_supported(irq)) + if (!pci_out_supported(irq) || !irq->scan_threshold) return; for_each_output_queue(irq, out, i) @@ -972,7 +973,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) } } - if (!pci_out_supported(irq_ptr)) + if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold) return; for_each_output_queue(irq_ptr, q, i) { @@ -1527,6 +1528,7 @@ set: static int handle_outbound(struct qdio_q *q, unsigned int callflags, int bufnr, int count) { + const unsigned int scan_threshold = q->irq_ptr->scan_threshold; unsigned char state = 0; int used, rc = 0; @@ -1565,8 +1567,12 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, rc = qdio_kick_outbound_q(q, 0); } + /* Let drivers implement their own completion scanning: */ + if (!scan_threshold) + return rc; + /* in case of SIGA errors we must process the error immediately */ - if (used >= q->u.out.scan_threshold || rc) + if (used >= scan_threshold || rc) qdio_tasklet_schedule(q); else /* free the SBALs in case of no further traffic */ @@ -1655,6 +1661,44 @@ rescan: } EXPORT_SYMBOL(qdio_start_irq); +static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr, + unsigned int *error) +{ + unsigned int start = q->first_to_check; + int count; + + count = q->is_input_q ? qdio_inbound_q_moved(q, start) : + qdio_outbound_q_moved(q, start); + if (count == 0) + return 0; + + *bufnr = start; + *error = q->qdio_error; + + /* for the next time */ + q->first_to_check = add_buf(start, count); + q->qdio_error = 0; + + return count; +} + +int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input, + unsigned int *bufnr, unsigned int *error) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + + if (!irq_ptr) + return -ENODEV; + q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr]; + + if (need_siga_sync(q)) + qdio_siga_sync_q(q); + + return __qdio_inspect_queue(q, bufnr, error); +} +EXPORT_SYMBOL_GPL(qdio_inspect_queue); + /** * qdio_get_next_buffers - process input buffers * @cdev: associated ccw_device for the qdio subchannel @@ -1672,13 +1716,10 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, { struct qdio_q *q; struct qdio_irq *irq_ptr = cdev->private->qdio_data; - unsigned int start; - int count; if (!irq_ptr) return -ENODEV; q = irq_ptr->input_qs[nr]; - start = q->first_to_check; /* * Cannot rely on automatic sync after interrupt since queues may @@ -1689,25 +1730,11 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, qdio_check_outbound_pci_queues(irq_ptr); - count = qdio_inbound_q_moved(q, start); - if (count == 0) - return 0; - - start = add_buf(start, count); - q->first_to_check = start; - /* Note: upper-layer MUST stop processing immediately here ... */ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return -EIO; - *bufnr = q->first_to_kick; - *error = q->qdio_error; - - /* for the next time */ - q->first_to_kick = add_buf(q->first_to_kick, count); - q->qdio_error = 0; - - return count; + return __qdio_inspect_queue(q, bufnr, error); } EXPORT_SYMBOL(qdio_get_next_buffers); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index d4101cecdc8d..f4ca1d29d61b 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -248,7 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr, output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; q->is_input_q = 0; - q->u.out.scan_threshold = qdio_init->scan_threshold; setup_storage_lists(q, irq_ptr, output_sbal_array, i); output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; @@ -474,6 +473,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) irq_ptr->nr_input_qs = init_data->no_input_qs; irq_ptr->nr_output_qs = init_data->no_output_qs; irq_ptr->cdev = init_data->cdev; + irq_ptr->scan_threshold = init_data->scan_threshold; ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); setup_queues(irq_ptr, init_data); diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 4a8a5373cb35..3ce99e4db44d 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -307,8 +307,7 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg) ch->ccw[1].count = ch->trans_skb->len; fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); ch->prof.send_stamp = jiffies; - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); ch->prof.doios_multi++; if (rc != 0) { priv->stats.tx_dropped += i; @@ -417,8 +416,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg) if (ctcm_checkalloc_buffer(ch)) return; ch->ccw[1].count = ch->max_bufsize; - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); if (rc != 0) ctcm_ccw_check_rc(ch, rc, "normal RX"); } @@ -478,8 +476,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); if (rc != 0) { fsm_deltimer(&ch->timer); fsm_newstate(fi, CTC_STATE_SETUPWAIT); @@ -527,8 +524,7 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg) return; ch->ccw[1].count = ch->max_bufsize; fsm_newstate(fi, CTC_STATE_RXIDLE); - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); if (rc != 0) { fsm_newstate(fi, CTC_STATE_RXINIT); ctcm_ccw_check_rc(ch, rc, "initial RX"); @@ -571,8 +567,7 @@ static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) /* Such conditional locking is undeterministic in * static view. => ignore sparse warnings here. */ - rc = ccw_device_start(ch->cdev, &ch->ccw[6], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[6], 0, 0xff, 0); if (event == CTC_EVENT_TIMER) /* see above comments */ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) { @@ -637,7 +632,7 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, CTC_STATE_STARTWAIT); fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); - rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + rc = ccw_device_halt(ch->cdev, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) { if (rc != -EBUSY) @@ -672,7 +667,7 @@ static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) * static view. => ignore sparse warnings here. */ oldstate = fsm_getstate(fi); fsm_newstate(fi, CTC_STATE_TERM); - rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + rc = ccw_device_halt(ch->cdev, 0); if (event == CTC_EVENT_STOP) spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); @@ -799,7 +794,7 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { - int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + int rc = ccw_device_halt(ch->cdev, 0); if (rc != 0) ctcm_ccw_check_rc(ch, rc, "HaltIO in chx_setuperr"); @@ -851,7 +846,7 @@ static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) /* Such conditional locking is a known problem for * sparse because its undeterministic in static view. * Warnings should be ignored here. */ - rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + rc = ccw_device_halt(ch->cdev, 0); if (event == CTC_EVENT_TIMER) spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) { @@ -947,8 +942,8 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) ch2 = priv->channel[CTCM_WRITE]; fsm_newstate(ch2->fsm, CTC_STATE_DTERM); - ccw_device_halt(ch->cdev, (unsigned long)ch); - ccw_device_halt(ch2->cdev, (unsigned long)ch2); + ccw_device_halt(ch->cdev, 0); + ccw_device_halt(ch2->cdev, 0); } /** @@ -1041,8 +1036,7 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) ctcmpc_dumpit((char *)&ch->ccw[3], sizeof(struct ccw1) * 3); - rc = ccw_device_start(ch->cdev, &ch->ccw[3], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[3], 0, 0xff, 0); if (event == CTC_EVENT_TIMER) spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); @@ -1361,8 +1355,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) ch->prof.send_stamp = jiffies; if (do_debug_ccw) ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); ch->prof.doios_multi++; if (rc != 0) { priv->stats.tx_dropped += i; @@ -1462,8 +1455,7 @@ again: if (dolock) spin_lock_irqsave( get_ccwdev_lock(ch->cdev), saveflags); - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); if (dolock) /* see remark about conditional locking */ spin_unlock_irqrestore( get_ccwdev_lock(ch->cdev), saveflags); @@ -1569,8 +1561,7 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) if (event == CTC_EVENT_START) /* see remark about conditional locking */ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); - rc = ccw_device_start(ch->cdev, &ch->ccw[0], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); if (event == CTC_EVENT_START) spin_unlock_irqrestore( get_ccwdev_lock(ch->cdev), saveflags); @@ -1825,8 +1816,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags); wch->prof.send_stamp = jiffies; - rc = ccw_device_start(wch->cdev, &wch->ccw[3], - (unsigned long) wch, 0xff, 0); + rc = ccw_device_start(wch->cdev, &wch->ccw[3], 0, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags); if ((grp->sweep_req_pend_num == 0) && diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index f63c5c871d3d..437a6d822105 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -569,8 +569,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = jiffies; - rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], 0, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; @@ -833,8 +832,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = jiffies; - rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], 0, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; @@ -1074,10 +1072,8 @@ static void ctcm_free_netdevice(struct net_device *dev) if (grp) { if (grp->fsm) kfree_fsm(grp->fsm); - if (grp->xid_skb) - dev_kfree_skb(grp->xid_skb); - if (grp->rcvd_xid_skb) - dev_kfree_skb(grp->rcvd_xid_skb); + dev_kfree_skb(grp->xid_skb); + dev_kfree_skb(grp->rcvd_xid_skb); tasklet_kill(&grp->mpc_tasklet2); kfree(grp); priv->mpcg = NULL; diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 1534420a0243..ab316baa8284 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -1523,8 +1523,7 @@ void mpc_action_send_discontact(unsigned long thischan) unsigned long saveflags = 0; spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); - rc = ccw_device_start(ch->cdev, &ch->ccw[15], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[15], 0, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) { @@ -1797,8 +1796,7 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side) } fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch); - rc = ccw_device_start(ch->cdev, &ch->ccw[8], - (unsigned long)ch, 0xff, 0); + rc = ccw_device_start(ch->cdev, &ch->ccw[8], 0, 0xff, 0); if (gotlock) /* see remark above about conditional locking */ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 2d9fe7e4ee40..8f08b0a2917c 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -504,7 +504,7 @@ lcs_clear_channel(struct lcs_channel *channel) LCS_DBF_TEXT(4,trace,"clearch"); LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_clear(channel->ccwdev, (addr_t) channel); + rc = ccw_device_clear(channel->ccwdev, 0); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { LCS_DBF_TEXT_(4, trace, "ecsc%s", @@ -532,7 +532,7 @@ lcs_stop_channel(struct lcs_channel *channel) LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); channel->state = LCS_CH_STATE_INIT; spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_halt(channel->ccwdev, (addr_t) channel); + rc = ccw_device_halt(channel->ccwdev, 0); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { LCS_DBF_TEXT_(4, trace, "ehsc%s", @@ -1427,7 +1427,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) channel->state = LCS_CH_STATE_SUSPENDED; if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { if (irb->scsw.cmd.cc != 0) { - ccw_device_halt(channel->ccwdev, (addr_t) channel); + ccw_device_halt(channel->ccwdev, 0); return; } /* The channel has been stopped by halt_IO. */ diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 28db887d38ed..e4b55f9aa062 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -22,6 +22,7 @@ #include <linux/hashtable.h> #include <linux/ip.h> #include <linux/refcount.h> +#include <linux/timer.h> #include <linux/wait.h> #include <linux/workqueue.h> @@ -30,6 +31,7 @@ #include <net/ipv6.h> #include <net/if_inet6.h> #include <net/addrconf.h> +#include <net/sch_generic.h> #include <net/tcp.h> #include <asm/debug.h> @@ -376,6 +378,28 @@ enum qeth_header_ids { #define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 #define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ +static inline bool qeth_l2_same_vlan(struct qeth_hdr_layer2 *h1, + struct qeth_hdr_layer2 *h2) +{ + return !((h1->flags[2] ^ h2->flags[2]) & QETH_LAYER2_FLAG_VLAN) && + h1->vlan_id == h2->vlan_id; +} + +static inline bool qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 *h1, + struct qeth_hdr_layer3 *h2) +{ + return !((h1->ext_flags ^ h2->ext_flags) & QETH_HDR_EXT_VLAN_FRAME) && + h1->vlan_id == h2->vlan_id; +} + +static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1, + struct qeth_hdr_layer3 *h2) +{ + return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) && + ipv6_addr_equal(&h1->next_hop.ipv6_addr, + &h2->next_hop.ipv6_addr); +} + enum qeth_qdio_info_states { QETH_QDIO_UNINITIALIZED, QETH_QDIO_ALLOCATED, @@ -424,6 +448,7 @@ struct qeth_qdio_out_buffer { struct qdio_buffer *buffer; atomic_t state; int next_element_to_fill; + unsigned int bytes; struct sk_buff_head skb_list; int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; @@ -473,6 +498,8 @@ struct qeth_out_q_stats { u64 tso_bytes; u64 packing_mode_switch; u64 stopped; + u64 completion_yield; + u64 completion_timer; /* rtnl_link_stats64 */ u64 tx_packets; @@ -481,6 +508,8 @@ struct qeth_out_q_stats { u64 tx_dropped; }; +#define QETH_TX_TIMER_USECS 500 + struct qeth_qdio_out_q { struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; @@ -499,13 +528,36 @@ struct qeth_qdio_out_q { atomic_t used_buffers; /* indicates whether PCI flag must be set (or if one is outstanding) */ atomic_t set_pci_flags_count; + struct napi_struct napi; + struct timer_list timer; + struct qeth_hdr *prev_hdr; + u8 bulk_start; }; +#define qeth_for_each_output_queue(card, q, i) \ + for (i = 0; i < card->qdio.no_out_queues && \ + (q = card->qdio.out_qs[i]); i++) + +#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi) + +static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue) +{ + if (timer_pending(&queue->timer)) + return; + mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) + + jiffies); +} + static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue) { return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q; } +static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue) +{ + return atomic_read(&queue->used_buffers) == 0; +} + struct qeth_qdio_info { atomic_t state; /* input */ @@ -572,15 +624,26 @@ struct qeth_channel { atomic_t irq_pending; }; +struct qeth_reply { + int (*callback)(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data); + void *param; +}; + struct qeth_cmd_buffer { + struct list_head list; + struct completion done; + spinlock_t lock; unsigned int length; refcount_t ref_count; struct qeth_channel *channel; - struct qeth_reply *reply; + struct qeth_reply reply; long timeout; unsigned char *data; void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob); - void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob); + void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob, + unsigned int data_length); + int rc; }; static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob) @@ -626,19 +689,6 @@ struct qeth_seqno { __u16 ipa; }; -struct qeth_reply { - struct list_head list; - struct completion received; - spinlock_t lock; - int (*callback)(struct qeth_card *, struct qeth_reply *, - unsigned long); - u32 seqno; - unsigned long offset; - int rc; - void *param; - refcount_t refcnt; -}; - struct qeth_card_blkt { int time_total; int inter_packet; @@ -651,10 +701,11 @@ struct qeth_card_blkt { struct qeth_card_info { unsigned short unit_addr2; unsigned short cula; - unsigned short chpid; + u8 chpid; __u16 func_level; char mcl_level[QETH_MCL_LENGTH + 1]; u8 open_when_online:1; + u8 promisc_mode:1; u8 use_v1_blkt:1; u8 is_vm_nic:1; int mac_bits; @@ -664,7 +715,6 @@ struct qeth_card_info { int unique_id; bool layer_enforced; struct qeth_card_blkt blkt; - enum qeth_ipa_promisc_modes promisc_mode; __u32 diagass_support; __u32 hwtrap; }; @@ -994,6 +1044,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, enum qeth_diags_cmds sub_cmd, unsigned int data_length); +void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason); void qeth_put_cmd(struct qeth_cmd_buffer *iob); struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, @@ -1005,10 +1056,9 @@ void qeth_clear_ipacmd_list(struct qeth_card *); int qeth_qdio_clear_card(struct qeth_card *, int); void qeth_clear_working_pool_list(struct qeth_card *); void qeth_drain_output_queues(struct qeth_card *card); -void qeth_setadp_promisc_mode(struct qeth_card *); +void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable); int qeth_setadpparms_change_macaddr(struct qeth_card *); void qeth_tx_timeout(struct net_device *); -void qeth_notify_reply(struct qeth_reply *reply, int reason); void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, u16 cmd_length); int qeth_query_switch_attributes(struct qeth_card *card, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 6502b148541e..a7868c8133ee 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -63,14 +63,16 @@ static struct device *qeth_core_root_dev; static struct lock_class_key qdio_out_skb_queue_key; static void qeth_issue_next_read_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob); + struct qeth_cmd_buffer *iob, + unsigned int data_length); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); static void qeth_free_qdio_queues(struct qeth_card *card); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); -static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); +static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, + int budget); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); static void qeth_close_dev_handler(struct work_struct *work) @@ -410,7 +412,7 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, /* release here to avoid interleaving between outbound tasklet and inbound tasklet regarding notifications and lifecycle */ - qeth_release_skbs(c); + qeth_tx_complete_buf(c, forced_cleanup, 0); c = f->next_pending; WARN_ON_ONCE(head->next_pending != f); @@ -536,50 +538,28 @@ static int qeth_issue_next_read(struct qeth_card *card) return ret; } -static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) -{ - struct qeth_reply *reply; - - reply = kzalloc(sizeof(*reply), GFP_KERNEL); - if (reply) { - refcount_set(&reply->refcnt, 1); - init_completion(&reply->received); - spin_lock_init(&reply->lock); - } - return reply; -} - -static void qeth_get_reply(struct qeth_reply *reply) -{ - refcount_inc(&reply->refcnt); -} - -static void qeth_put_reply(struct qeth_reply *reply) -{ - if (refcount_dec_and_test(&reply->refcnt)) - kfree(reply); -} - -static void qeth_enqueue_reply(struct qeth_card *card, struct qeth_reply *reply) +static void qeth_enqueue_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) { spin_lock_irq(&card->lock); - list_add_tail(&reply->list, &card->cmd_waiter_list); + list_add_tail(&iob->list, &card->cmd_waiter_list); spin_unlock_irq(&card->lock); } -static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply) +static void qeth_dequeue_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) { spin_lock_irq(&card->lock); - list_del(&reply->list); + list_del(&iob->list); spin_unlock_irq(&card->lock); } -void qeth_notify_reply(struct qeth_reply *reply, int reason) +void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) { - reply->rc = reason; - complete(&reply->received); + iob->rc = reason; + complete(&iob->done); } -EXPORT_SYMBOL_GPL(qeth_notify_reply); +EXPORT_SYMBOL_GPL(qeth_notify_cmd); static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, struct qeth_card *card) @@ -657,14 +637,14 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, void qeth_clear_ipacmd_list(struct qeth_card *card) { - struct qeth_reply *reply; + struct qeth_cmd_buffer *iob; unsigned long flags; QETH_CARD_TEXT(card, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(reply, &card->cmd_waiter_list, list) - qeth_notify_reply(reply, -EIO); + list_for_each_entry(iob, &card->cmd_waiter_list, list) + qeth_notify_cmd(iob, -EIO); spin_unlock_irqrestore(&card->lock, flags); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); @@ -693,8 +673,6 @@ static int qeth_check_idx_response(struct qeth_card *card, void qeth_put_cmd(struct qeth_cmd_buffer *iob) { if (refcount_dec_and_test(&iob->ref_count)) { - if (iob->reply) - qeth_put_reply(iob->reply); kfree(iob->data); kfree(iob); } @@ -702,17 +680,15 @@ void qeth_put_cmd(struct qeth_cmd_buffer *iob) EXPORT_SYMBOL_GPL(qeth_put_cmd); static void qeth_release_buffer_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { qeth_put_cmd(iob); } static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) { - struct qeth_reply *reply = iob->reply; - - if (reply) - qeth_notify_reply(reply, rc); + qeth_notify_cmd(iob, rc); qeth_put_cmd(iob); } @@ -736,6 +712,9 @@ struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, return NULL; } + init_completion(&iob->done); + spin_lock_init(&iob->lock); + INIT_LIST_HEAD(&iob->list); refcount_set(&iob->ref_count, 1); iob->channel = channel; iob->timeout = timeout; @@ -745,11 +724,13 @@ struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, EXPORT_SYMBOL_GPL(qeth_alloc_cmd); static void qeth_issue_next_read_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { + struct qeth_cmd_buffer *request = NULL; struct qeth_ipa_cmd *cmd = NULL; struct qeth_reply *reply = NULL; - struct qeth_reply *r; + struct qeth_cmd_buffer *tmp; unsigned long flags; int rc = 0; @@ -784,44 +765,39 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, /* match against pending cmd requests */ spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(r, &card->cmd_waiter_list, list) { - if ((r->seqno == QETH_IDX_COMMAND_SEQNO) || - (cmd && (r->seqno == cmd->hdr.seqno))) { - reply = r; + list_for_each_entry(tmp, &card->cmd_waiter_list, list) { + if (!IS_IPA(tmp->data) || + __ipa_cmd(tmp)->hdr.seqno == cmd->hdr.seqno) { + request = tmp; /* take the object outside the lock */ - qeth_get_reply(reply); + qeth_get_cmd(request); break; } } spin_unlock_irqrestore(&card->lock, flags); - if (!reply) + if (!request) goto out; + reply = &request->reply; if (!reply->callback) { rc = 0; goto no_callback; } - spin_lock_irqsave(&reply->lock, flags); - if (reply->rc) { + spin_lock_irqsave(&request->lock, flags); + if (request->rc) /* Bail out when the requestor has already left: */ - rc = reply->rc; - } else { - if (cmd) { - reply->offset = (u16)((char *)cmd - (char *)iob->data); - rc = reply->callback(card, reply, (unsigned long)cmd); - } else { - rc = reply->callback(card, reply, (unsigned long)iob); - } - } - spin_unlock_irqrestore(&reply->lock, flags); + rc = request->rc; + else + rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : + (unsigned long)iob); + spin_unlock_irqrestore(&request->lock, flags); no_callback: if (rc <= 0) - qeth_notify_reply(reply, rc); - qeth_put_reply(reply); - + qeth_notify_cmd(request, rc); + qeth_put_cmd(request); out: memcpy(&card->seqno.pdu_hdr_ack, QETH_PDU_HEADER_SEQ_NO(iob->data), @@ -1072,8 +1048,16 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, } } - if (iob && iob->callback) - iob->callback(card, iob); + if (iob) { + /* sanity check: */ + if (irb->scsw.cmd.count > iob->length) { + qeth_cancel_cmd(iob, -EIO); + goto out; + } + if (iob->callback) + iob->callback(card, iob, + iob->length - irb->scsw.cmd.count); + } out: wake_up(&card->wait_q); @@ -1094,22 +1078,52 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, } } -static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) +static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, + int budget) { + struct qeth_qdio_out_q *queue = buf->q; struct sk_buff *skb; /* release may never happen from within CQ tasklet scope */ WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) - qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); + qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR); - while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) - consume_skb(skb); + /* Empty buffer? */ + if (buf->next_element_to_fill == 0) + return; + + QETH_TXQ_STAT_INC(queue, bufs); + QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); + while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { + unsigned int bytes = qdisc_pkt_len(skb); + bool is_tso = skb_is_gso(skb); + unsigned int packets; + + packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; + if (error) { + QETH_TXQ_STAT_ADD(queue, tx_errors, packets); + } else { + QETH_TXQ_STAT_ADD(queue, tx_packets, packets); + QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes); + if (skb->ip_summed == CHECKSUM_PARTIAL) + QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); + if (skb_is_nonlinear(skb)) + QETH_TXQ_STAT_INC(queue, skbs_sg); + if (is_tso) { + QETH_TXQ_STAT_INC(queue, skbs_tso); + QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); + } + } + + napi_consume_skb(skb, budget); + } } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf) + struct qeth_qdio_out_buffer *buf, + bool error, int budget) { int i; @@ -1117,7 +1131,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) atomic_dec(&queue->set_pci_flags_count); - qeth_release_skbs(buf); + qeth_tx_complete_buf(buf, error, budget); for (i = 0; i < queue->max_elements; ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) @@ -1128,6 +1142,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); buf->next_element_to_fill = 0; + buf->bytes = 0; atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } @@ -1139,7 +1154,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) if (!q->bufs[j]) continue; qeth_cleanup_handled_pending(q, j, 1); - qeth_clear_output_buffer(q, q->bufs[j]); + qeth_clear_output_buffer(q, q->bufs[j], true, 0); if (free) { kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); q->bufs[j] = NULL; @@ -1652,7 +1667,6 @@ static void qeth_mpc_finalize_cmd(struct qeth_card *card, memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); - iob->reply->seqno = QETH_IDX_COMMAND_SEQNO; iob->callback = qeth_release_buffer_cb; } @@ -1703,29 +1717,19 @@ static int qeth_send_control_data(struct qeth_card *card, void *reply_param) { struct qeth_channel *channel = iob->channel; + struct qeth_reply *reply = &iob->reply; long timeout = iob->timeout; int rc; - struct qeth_reply *reply = NULL; QETH_CARD_TEXT(card, 2, "sendctl"); - reply = qeth_alloc_reply(card); - if (!reply) { - qeth_put_cmd(iob); - return -ENOMEM; - } reply->callback = reply_cb; reply->param = reply_param; - /* pairs with qeth_put_cmd(): */ - qeth_get_reply(reply); - iob->reply = reply; - timeout = wait_event_interruptible_timeout(card->wait_q, qeth_trylock_channel(channel), timeout); if (timeout <= 0) { - qeth_put_reply(reply); qeth_put_cmd(iob); return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; } @@ -1734,7 +1738,10 @@ static int qeth_send_control_data(struct qeth_card *card, iob->finalize(card, iob); QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); - qeth_enqueue_reply(card, reply); + qeth_enqueue_cmd(card, iob); + + /* This pairs with iob->callback, and keeps the iob alive after IO: */ + qeth_get_cmd(iob); QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); @@ -1745,51 +1752,74 @@ static int qeth_send_control_data(struct qeth_card *card, QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", CARD_DEVID(card), rc); QETH_CARD_TEXT_(card, 2, " err%d", rc); - qeth_dequeue_reply(card, reply); - qeth_put_reply(reply); + qeth_dequeue_cmd(card, iob); qeth_put_cmd(iob); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); - return rc; + goto out; } - timeout = wait_for_completion_interruptible_timeout(&reply->received, + timeout = wait_for_completion_interruptible_timeout(&iob->done, timeout); if (timeout <= 0) rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; - qeth_dequeue_reply(card, reply); + qeth_dequeue_cmd(card, iob); if (reply_cb) { /* Wait until the callback for a late reply has completed: */ - spin_lock_irq(&reply->lock); + spin_lock_irq(&iob->lock); if (rc) /* Zap any callback that's still pending: */ - reply->rc = rc; - spin_unlock_irq(&reply->lock); + iob->rc = rc; + spin_unlock_irq(&iob->lock); } if (!rc) - rc = reply->rc; - qeth_put_reply(reply); + rc = iob->rc; + +out: + qeth_put_cmd(iob); return rc; } +struct qeth_node_desc { + struct node_descriptor nd1; + struct node_descriptor nd2; + struct node_descriptor nd3; +}; + static void qeth_read_conf_data_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { - unsigned char *prcd = iob->data; + struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; + int rc = 0; + u8 *tag; QETH_CARD_TEXT(card, 2, "cfgunit"); - card->info.chpid = prcd[30]; - card->info.unit_addr2 = prcd[31]; - card->info.cula = prcd[63]; - card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) && - (prcd[0x11] == _ascebc['M'])); - card->info.use_v1_blkt = prcd[74] == 0xF0 && prcd[75] == 0xF0 && - prcd[76] >= 0xF1 && prcd[76] <= 0xF4; - - qeth_notify_reply(iob->reply, 0); + + if (data_length < sizeof(*nd)) { + rc = -EINVAL; + goto out; + } + + card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && + nd->nd1.plant[1] == _ascebc['M']; + tag = (u8 *)&nd->nd1.tag; + card->info.chpid = tag[0]; + card->info.unit_addr2 = tag[1]; + + tag = (u8 *)&nd->nd2.tag; + card->info.cula = tag[1]; + + card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && + nd->nd3.model[1] == 0xF0 && + nd->nd3.model[2] >= 0xF1 && + nd->nd3.model[2] <= 0xF4; + +out: + qeth_notify_cmd(iob, rc); qeth_put_cmd(iob); } @@ -1803,6 +1833,8 @@ static int qeth_read_conf_data(struct qeth_card *card) ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); if (!ciw || ciw->cmd == 0) return -EOPNOTSUPP; + if (ciw->count < sizeof(struct qeth_node_desc)) + return -EINVAL; iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); if (!iob) @@ -1850,7 +1882,8 @@ static int qeth_idx_check_activate_response(struct qeth_card *card, } static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { struct qeth_channel *channel = iob->channel; u16 peer_level; @@ -1878,12 +1911,13 @@ static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); out: - qeth_notify_reply(iob->reply, rc); + qeth_notify_cmd(iob, rc); qeth_put_cmd(iob); } static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { struct qeth_channel *channel = iob->channel; u16 peer_level; @@ -1905,7 +1939,7 @@ static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, } out: - qeth_notify_reply(iob->reply, rc); + qeth_notify_cmd(iob, rc); qeth_put_cmd(iob); } @@ -2253,6 +2287,14 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) return q; } +static void qeth_tx_completion_timer(struct timer_list *timer) +{ + struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); + + napi_schedule(&queue->napi); + QETH_TXQ_STAT_INC(queue, completion_timer); +} + static int qeth_alloc_qdio_queues(struct qeth_card *card) { int i, j; @@ -2274,17 +2316,22 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card) /* outbound */ for (i = 0; i < card->qdio.no_out_queues; ++i) { - card->qdio.out_qs[i] = qeth_alloc_output_queue(); - if (!card->qdio.out_qs[i]) + struct qeth_qdio_out_q *queue; + + queue = qeth_alloc_output_queue(); + if (!queue) goto out_freeoutq; QETH_CARD_TEXT_(card, 2, "outq %i", i); - QETH_CARD_HEX(card, 2, &card->qdio.out_qs[i], sizeof(void *)); - card->qdio.out_qs[i]->card = card; - card->qdio.out_qs[i]->queue_no = i; + QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); + card->qdio.out_qs[i] = queue; + queue->card = card; + queue->queue_no = i; + timer_setup(&queue->timer, qeth_tx_completion_timer, 0); + /* give outbound qeth_qdio_buffers their qdio_buffers */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { - WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL); - if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) + WARN_ON(queue->bufs[j]); + if (qeth_init_qdio_out_buf(queue, j)) goto out_freeoutqbufs; } } @@ -2624,9 +2671,12 @@ int qeth_init_qdio_queues(struct qeth_card *card) queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); queue->next_buf_to_fill = 0; queue->do_pack = 0; + queue->prev_hdr = NULL; + queue->bulk_start = 0; atomic_set(&queue->used_buffers, 0); atomic_set(&queue->set_pci_flags_count, 0); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); } return 0; } @@ -2638,8 +2688,7 @@ static void qeth_ipa_finalize_cmd(struct qeth_card *card, qeth_mpc_finalize_cmd(card, iob); /* override with IPA-specific values: */ - __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa; - iob->reply->seqno = card->seqno.ipa++; + __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; } void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, @@ -3196,6 +3245,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, int count) { + struct qeth_card *card = queue->card; struct qeth_qdio_out_buffer *buf; int rc; int i; @@ -3239,14 +3289,17 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } } - QETH_TXQ_STAT_ADD(queue, bufs, count); qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); + + /* Fake the TX completion interrupt: */ + if (IS_IQD(card)) + napi_schedule(&queue->napi); + if (rc) { - QETH_TXQ_STAT_ADD(queue, tx_errors, count); /* ignore temporary SIGA errors without busy condition */ if (rc == -ENOBUFS) return; @@ -3263,6 +3316,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } } +static void qeth_flush_queue(struct qeth_qdio_out_q *queue) +{ + qeth_flush_buffers(queue, queue->bulk_start, 1); + + queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1); + queue->prev_hdr = NULL; +} + static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) { int index; @@ -3424,48 +3485,12 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, int bidx = i % QDIO_MAX_BUFFERS_PER_Q; buffer = queue->bufs[bidx]; qeth_handle_send_error(card, buffer, qdio_error); - - if (queue->bufstates && - (queue->bufstates[bidx].flags & - QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) { - WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); - - if (atomic_cmpxchg(&buffer->state, - QETH_QDIO_BUF_PRIMED, - QETH_QDIO_BUF_PENDING) == - QETH_QDIO_BUF_PRIMED) { - qeth_notify_skbs(queue, buffer, - TX_NOTIFY_PENDING); - } - QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); - - /* prepare the queue slot for re-use: */ - qeth_scrub_qdio_buffer(buffer->buffer, - queue->max_elements); - if (qeth_init_qdio_out_buf(queue, bidx)) { - QETH_CARD_TEXT(card, 2, "outofbuf"); - qeth_schedule_recovery(card); - } - } else { - if (card->options.cq == QETH_CQ_ENABLED) { - enum iucv_tx_notify n; - - n = qeth_compute_cq_notification( - buffer->buffer->element[15].sflags, 0); - qeth_notify_skbs(queue, buffer, n); - } - - qeth_clear_output_buffer(queue, buffer); - } - qeth_cleanup_handled_pending(queue, bidx, 0); + qeth_clear_output_buffer(queue, buffer, qdio_error, 0); } + atomic_sub(count, &queue->used_buffers); - /* check if we need to do something on this outbound queue */ - if (!IS_IQD(card)) - qeth_check_outbound_queue(queue); + qeth_check_outbound_queue(queue); - if (IS_IQD(card)) - __queue = qeth_iqd_translate_txq(dev, __queue); txq = netdev_get_tx_queue(dev, __queue); /* xmit may have observed the full-condition, but not yet stopped the * txq. In which case the code below won't trigger. So before returning, @@ -3535,7 +3560,7 @@ static int qeth_get_elements_for_frags(struct sk_buff *skb) int cnt, elements = 0; for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; elements += qeth_get_elements_for_range( (addr_t)skb_frag_address(frag), @@ -3654,9 +3679,32 @@ check_layout: return 0; } -static void __qeth_fill_buffer(struct sk_buff *skb, - struct qeth_qdio_out_buffer *buf, - bool is_first_elem, unsigned int offset) +static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buffer, + struct sk_buff *curr_skb, + struct qeth_hdr *curr_hdr) +{ + struct qeth_hdr *prev_hdr = queue->prev_hdr; + + if (!prev_hdr) + return true; + + /* All packets must have the same target: */ + if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { + struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); + + return ether_addr_equal(eth_hdr(prev_skb)->h_dest, + eth_hdr(curr_skb)->h_dest) && + qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); + } + + return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && + qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); +} + +static unsigned int __qeth_fill_buffer(struct sk_buff *skb, + struct qeth_qdio_out_buffer *buf, + bool is_first_elem, unsigned int offset) { struct qdio_buffer *buffer = buf->buffer; int element = buf->next_element_to_fill; @@ -3713,24 +3761,21 @@ static void __qeth_fill_buffer(struct sk_buff *skb, if (buffer->element[element - 1].eflags) buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; buf->next_element_to_fill = element; + return element; } /** * qeth_fill_buffer() - map skb into an output buffer - * @queue: QDIO queue to submit the buffer on * @buf: buffer to transport the skb * @skb: skb to map into the buffer * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated * from qeth_core_header_cache. * @offset: when mapping the skb, start at skb->data + offset * @hd_len: if > 0, build a dedicated header element of this size - * flush: Prepare the buffer to be flushed, regardless of its fill level. */ -static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int offset, unsigned int hd_len, - bool flush) +static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len) { struct qdio_buffer *buffer = buf->buffer; bool is_first_elem = true; @@ -3750,35 +3795,22 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, buf->next_element_to_fill++; } - __qeth_fill_buffer(skb, buf, is_first_elem, offset); - - if (!queue->do_pack) { - QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); - } else { - QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); - - QETH_TXQ_STAT_INC(queue, skbs_pack); - /* If the buffer still has free elements, keep using it. */ - if (!flush && - buf->next_element_to_fill < queue->max_elements) - return 0; - } - - /* flush out the buffer */ - atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); - queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % - QDIO_MAX_BUFFERS_PER_Q; - return 1; + return __qeth_fill_buffer(skb, buf, is_first_elem, offset); } -static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, - struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int offset, unsigned int hd_len) +static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, unsigned int elements, + struct qeth_hdr *hdr, unsigned int offset, + unsigned int hd_len) { - int index = queue->next_buf_to_fill; - struct qeth_qdio_out_buffer *buffer = queue->bufs[index]; + struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; + unsigned int bytes = qdisc_pkt_len(skb); + unsigned int next_element; struct netdev_queue *txq; bool stopped = false; + bool flush; + + txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); /* Just a sanity check, the wake/stop logic should ensure that we always * get a free buffer. @@ -3786,9 +3818,19 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) return -EBUSY; - txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb)); + if ((buffer->next_element_to_fill + elements > queue->max_elements) || + !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) { + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + qeth_flush_queue(queue); + buffer = queue->bufs[queue->bulk_start]; + + /* Sanity-check again: */ + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) + return -EBUSY; + } - if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { + if (buffer->next_element_to_fill == 0 && + atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { /* If a TX completion happens right _here_ and misses to wake * the txq, then our re-check below will catch the race. */ @@ -3797,8 +3839,17 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, stopped = true; } - qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped); - qeth_flush_buffers(queue, index, 1); + next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); + buffer->bytes += bytes; + queue->prev_hdr = hdr; + + flush = __netdev_tx_sent_queue(txq, bytes, + !stopped && netdev_xmit_more()); + + if (flush || next_element >= queue->max_elements) { + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + qeth_flush_queue(queue); + } if (stopped && !qeth_out_queue_is_full(queue)) netif_tx_start_queue(txq); @@ -3811,6 +3862,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, int elements_needed) { struct qeth_qdio_out_buffer *buffer; + unsigned int next_element; struct netdev_queue *txq; bool stopped = false; int start_index; @@ -3873,8 +3925,17 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, stopped = true; } - flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, - stopped); + next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); + + if (queue->do_pack) + QETH_TXQ_STAT_INC(queue, skbs_pack); + if (!queue->do_pack || stopped || next_element >= queue->max_elements) { + flush_count++; + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % + QDIO_MAX_BUFFERS_PER_Q; + } + if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); else if (!atomic_read(&queue->set_pci_flags_count)) @@ -3941,7 +4002,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, unsigned int hd_len = 0; unsigned int elements; int push_len, rc; - bool is_sg; if (is_tso) { hw_hdr_len = sizeof(struct qeth_hdr_tso); @@ -3970,10 +4030,9 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, frame_len - proto_len, skb, proto_len); - is_sg = skb_is_nonlinear(skb); if (IS_IQD(card)) { - rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, - hd_len); + rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, + hd_len); } else { /* TODO: drop skb_orphan() once TX completion is fast enough */ skb_orphan(skb); @@ -3981,18 +4040,9 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, hd_len, elements); } - if (!rc) { - QETH_TXQ_STAT_ADD(queue, buf_elements, elements); - if (is_sg) - QETH_TXQ_STAT_INC(queue, skbs_sg); - if (is_tso) { - QETH_TXQ_STAT_INC(queue, skbs_tso); - QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len); - } - } else { - if (!push_len) - kmem_cache_free(qeth_core_header_cache, hdr); - } + if (rc && !push_len) + kmem_cache_free(qeth_core_header_cache, hdr); + return rc; } EXPORT_SYMBOL_GPL(qeth_xmit); @@ -4014,23 +4064,14 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, return (cmd->hdr.return_code) ? -EIO : 0; } -void qeth_setadp_promisc_mode(struct qeth_card *card) +void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) { - enum qeth_ipa_promisc_modes mode; - struct net_device *dev = card->dev; + enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : + SET_PROMISC_MODE_OFF; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 4, "setprom"); - - if (((dev->flags & IFF_PROMISC) && - (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || - (!(dev->flags & IFF_PROMISC) && - (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) - return; - mode = SET_PROMISC_MODE_OFF; - if (dev->flags & IFF_PROMISC) - mode = SET_PROMISC_MODE_ON; QETH_CARD_TEXT_(card, 4, "mode:%x", mode); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, @@ -4298,20 +4339,16 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) } static int qeth_snmp_command_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long sdata) + struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; - struct qeth_arp_query_info *qinfo; - unsigned char *data; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_arp_query_info *qinfo = reply->param; + struct qeth_ipacmd_setadpparms *adp_cmd; + unsigned int data_len; void *snmp_data; - __u16 data_len; QETH_CARD_TEXT(card, 3, "snpcmdcb"); - cmd = (struct qeth_ipa_cmd *) sdata; - data = (unsigned char *)((char *)cmd - reply->offset); - qinfo = (struct qeth_arp_query_info *) reply->param; - if (cmd->hdr.return_code) { QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); return -EIO; @@ -4322,15 +4359,14 @@ static int qeth_snmp_command_cb(struct qeth_card *card, QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); return -EIO; } - data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); - if (cmd->data.setadapterparms.hdr.seq_no == 1) { - snmp_data = &cmd->data.setadapterparms.data.snmp; - data_len -= offsetof(struct qeth_ipa_cmd, - data.setadapterparms.data.snmp); + + adp_cmd = &cmd->data.setadapterparms; + data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); + if (adp_cmd->hdr.seq_no == 1) { + snmp_data = &adp_cmd->data.snmp; } else { - snmp_data = &cmd->data.setadapterparms.data.snmp.request; - data_len -= offsetof(struct qeth_ipa_cmd, - data.setadapterparms.data.snmp.request); + snmp_data = &adp_cmd->data.snmp.request; + data_len -= offsetof(struct qeth_snmp_cmd, request); } /* check if there is enough room in userspace */ @@ -4741,7 +4777,7 @@ static int qeth_qdio_establish(struct qeth_card *card) init_data.input_sbal_addr_array = in_sbal_ptrs; init_data.output_sbal_addr_array = out_sbal_ptrs; init_data.output_sbal_state_array = card->qdio.out_bufstates; - init_data.scan_threshold = IS_IQD(card) ? 1 : 32; + init_data.scan_threshold = IS_IQD(card) ? 0 : 32; if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { @@ -5155,6 +5191,107 @@ out: } EXPORT_SYMBOL_GPL(qeth_poll); +static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, + unsigned int bidx, bool error, int budget) +{ + struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; + u8 sflags = buffer->buffer->element[15].sflags; + struct qeth_card *card = queue->card; + + if (queue->bufstates && (queue->bufstates[bidx].flags & + QDIO_OUTBUF_STATE_FLAG_PENDING)) { + WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); + + if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, + QETH_QDIO_BUF_PENDING) == + QETH_QDIO_BUF_PRIMED) + qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); + + QETH_CARD_TEXT_(card, 5, "pel%u", bidx); + + /* prepare the queue slot for re-use: */ + qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); + if (qeth_init_qdio_out_buf(queue, bidx)) { + QETH_CARD_TEXT(card, 2, "outofbuf"); + qeth_schedule_recovery(card); + } + + return; + } + + if (card->options.cq == QETH_CQ_ENABLED) + qeth_notify_skbs(queue, buffer, + qeth_compute_cq_notification(sflags, 0)); + qeth_clear_output_buffer(queue, buffer, error, budget); +} + +static int qeth_tx_poll(struct napi_struct *napi, int budget) +{ + struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); + unsigned int queue_no = queue->queue_no; + struct qeth_card *card = queue->card; + struct net_device *dev = card->dev; + unsigned int work_done = 0; + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); + + while (1) { + unsigned int start, error, i; + unsigned int packets = 0; + unsigned int bytes = 0; + int completed; + + if (qeth_out_queue_is_empty(queue)) { + napi_complete(napi); + return 0; + } + + /* Give the CPU a breather: */ + if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { + QETH_TXQ_STAT_INC(queue, completion_yield); + if (napi_complete_done(napi, 0)) + napi_schedule(napi); + return 0; + } + + completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, + &start, &error); + if (completed <= 0) { + /* Ensure we see TX completion for pending work: */ + if (napi_complete_done(napi, 0)) + qeth_tx_arm_timer(queue); + return 0; + } + + for (i = start; i < start + completed; i++) { + struct qeth_qdio_out_buffer *buffer; + unsigned int bidx = QDIO_BUFNR(i); + + buffer = queue->bufs[bidx]; + packets += skb_queue_len(&buffer->skb_list); + bytes += buffer->bytes; + + qeth_handle_send_error(card, buffer, error); + qeth_iqd_tx_complete(queue, bidx, error, budget); + qeth_cleanup_handled_pending(queue, bidx, false); + } + + netdev_tx_completed_queue(txq, packets, bytes); + atomic_sub(completed, &queue->used_buffers); + work_done += completed; + + /* xmit may have observed the full-condition, but not yet + * stopped the txq. In which case the code below won't trigger. + * So before returning, xmit will re-check the txq's fill level + * and wake it up if needed. + */ + if (netif_tx_queue_stopped(txq) && + !qeth_out_queue_is_full(queue)) + netif_tx_wake_queue(txq); + } +} + static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) { if (!cmd->hdr.return_code) @@ -6101,6 +6238,17 @@ int qeth_open(struct net_device *dev) napi_enable(&card->napi); local_bh_disable(); napi_schedule(&card->napi); + if (IS_IQD(card)) { + struct qeth_qdio_out_q *queue; + unsigned int i; + + qeth_for_each_output_queue(card, queue, i) { + netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, + QETH_NAPI_WEIGHT); + napi_enable(&queue->napi); + napi_schedule(&queue->napi); + } + } /* kick-start the NAPI softirq: */ local_bh_enable(); return 0; @@ -6112,7 +6260,26 @@ int qeth_stop(struct net_device *dev) struct qeth_card *card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "qethstop"); - netif_tx_disable(dev); + if (IS_IQD(card)) { + struct qeth_qdio_out_q *queue; + unsigned int i; + + /* Quiesce the NAPI instances: */ + qeth_for_each_output_queue(card, queue, i) { + napi_disable(&queue->napi); + del_timer_sync(&queue->timer); + } + + /* Stop .ndo_start_xmit, might still access queue->napi. */ + netif_tx_disable(dev); + + /* Queues may get re-allocated, so remove the NAPIs here. */ + qeth_for_each_output_queue(card, queue, i) + netif_napi_del(&queue->napi); + } else { + netif_tx_disable(dev); + } + napi_disable(&card->napi); return 0; } diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 75b5834ed28d..6420b58cf42b 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -27,7 +27,6 @@ extern unsigned char IPA_PDU_HEADER[]; #define QETH_TIMEOUT (10 * HZ) #define QETH_IPA_TIMEOUT (45 * HZ) -#define QETH_IDX_COMMAND_SEQNO 0xffff0000 #define QETH_CLEAR_CHANNEL_PARM -10 #define QETH_HALT_CHANNEL_PARM -11 diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c index 4166eb29f0bd..096698df3886 100644 --- a/drivers/s390/net/qeth_ethtool.c +++ b/drivers/s390/net/qeth_ethtool.c @@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = { QETH_TXQ_STAT("TSO bytes", tso_bytes), QETH_TXQ_STAT("Packing mode switches", packing_mode_switch), QETH_TXQ_STAT("Queue stopped", stopped), + QETH_TXQ_STAT("Completion yield", completion_yield), + QETH_TXQ_STAT("Completion timer", completion_timer), }; static const struct qeth_stats card_stats[] = { diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index cbead3d1b2fd..b8799cd3e7aa 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -175,10 +175,8 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue, hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO; } else { hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; - if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->ip_summed == CHECKSUM_PARTIAL) qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); - QETH_TXQ_STAT_INC(queue, skbs_csum); - } } /* set byte byte 3 to casting flags */ @@ -439,23 +437,14 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return 0; } -static void qeth_promisc_to_bridge(struct qeth_card *card) +static void qeth_l2_promisc_to_bridge(struct qeth_card *card, bool enable) { - struct net_device *dev = card->dev; - enum qeth_ipa_promisc_modes promisc_mode; int role; int rc; QETH_CARD_TEXT(card, 3, "pmisc2br"); - if (!card->options.sbp.reflect_promisc) - return; - promisc_mode = (dev->flags & IFF_PROMISC) ? SET_PROMISC_MODE_ON - : SET_PROMISC_MODE_OFF; - if (promisc_mode == card->info.promisc_mode) - return; - - if (promisc_mode == SET_PROMISC_MODE_ON) { + if (enable) { if (card->options.sbp.reflect_promisc_primary) role = QETH_SBP_ROLE_PRIMARY; else @@ -464,14 +453,26 @@ static void qeth_promisc_to_bridge(struct qeth_card *card) role = QETH_SBP_ROLE_NONE; rc = qeth_bridgeport_setrole(card, role); - QETH_CARD_TEXT_(card, 2, "bpm%c%04x", - (promisc_mode == SET_PROMISC_MODE_ON) ? '+' : '-', rc); + QETH_CARD_TEXT_(card, 2, "bpm%c%04x", enable ? '+' : '-', rc); if (!rc) { card->options.sbp.role = role; - card->info.promisc_mode = promisc_mode; + card->info.promisc_mode = enable; } +} +static void qeth_l2_set_promisc_mode(struct qeth_card *card) +{ + bool enable = card->dev->flags & IFF_PROMISC; + + if (card->info.promisc_mode == enable) + return; + + if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) + qeth_setadp_promisc_mode(card, enable); + else if (card->options.sbp.reflect_promisc) + qeth_l2_promisc_to_bridge(card, enable); } + /* New MAC address is added to the hash table and marked to be written on card * only if there is not in the hash table storage already * @@ -539,10 +540,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work) } } - if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) - qeth_setadp_promisc_mode(card); - else - qeth_promisc_to_bridge(card); + qeth_l2_set_promisc_mode(card); } static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, @@ -588,9 +586,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, struct qeth_card *card = dev->ml_priv; u16 txq = skb_get_queue_mapping(skb); struct qeth_qdio_out_q *queue; - int tx_bytes = skb->len; int rc; + if (!skb_is_gso(skb)) + qdisc_skb_cb(skb)->pkt_len = skb->len; if (IS_IQD(card)) txq = qeth_iqd_translate_txq(dev, txq); queue = card->qdio.out_qs[txq]; @@ -601,11 +600,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb), qeth_l2_fill_header); - if (!rc) { - QETH_TXQ_STAT_INC(queue, tx_packets); - QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); + if (!rc) return NETDEV_TX_OK; - } QETH_TXQ_STAT_INC(queue, tx_dropped); kfree_skb(skb); @@ -1000,9 +996,10 @@ struct qeth_discipline qeth_l2_discipline = { EXPORT_SYMBOL_GPL(qeth_l2_discipline); static void qeth_osn_assist_cb(struct qeth_card *card, - struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob, + unsigned int data_length) { - qeth_notify_reply(iob->reply, 0); + qeth_notify_cmd(iob, 0); qeth_put_cmd(iob); } @@ -1703,7 +1700,6 @@ static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc) /* generic VNICC request call back control */ struct _qeth_l2_vnicc_request_cbctl { - u32 sub_cmd; struct { union{ u32 *sup_cmds; @@ -1721,6 +1717,7 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card, (struct _qeth_l2_vnicc_request_cbctl *) reply->param; struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc; + u32 sub_cmd = cmd->data.vnicc.hdr.sub_command; QETH_CARD_TEXT(card, 2, "vniccrcb"); if (cmd->hdr.return_code) @@ -1729,10 +1726,9 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card, card->options.vnicc.sup_chars = rep->vnicc_cmds.supported; card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled; - if (cbctl->sub_cmd == IPA_VNICC_QUERY_CMDS) + if (sub_cmd == IPA_VNICC_QUERY_CMDS) *cbctl->result.sup_cmds = rep->data.query_cmds.sup_cmds; - - if (cbctl->sub_cmd == IPA_VNICC_GET_TIMEOUT) + else if (sub_cmd == IPA_VNICC_GET_TIMEOUT) *cbctl->result.timeout = rep->data.getset_timeout.timeout; return 0; @@ -1760,7 +1756,6 @@ static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card, /* VNICC query VNIC characteristics request */ static int qeth_l2_vnicc_query_chars(struct qeth_card *card) { - struct _qeth_l2_vnicc_request_cbctl cbctl; struct qeth_cmd_buffer *iob; QETH_CARD_TEXT(card, 2, "vniccqch"); @@ -1768,10 +1763,7 @@ static int qeth_l2_vnicc_query_chars(struct qeth_card *card) if (!iob) return -ENOMEM; - /* prepare callback control */ - cbctl.sub_cmd = IPA_VNICC_QUERY_CHARS; - - return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl); + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL); } /* VNICC query sub commands request */ @@ -1790,7 +1782,6 @@ static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char, __ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char; /* prepare callback control */ - cbctl.sub_cmd = IPA_VNICC_QUERY_CMDS; cbctl.result.sup_cmds = sup_cmds; return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl); @@ -1800,7 +1791,6 @@ static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char, static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char, u32 cmd) { - struct _qeth_l2_vnicc_request_cbctl cbctl; struct qeth_cmd_buffer *iob; QETH_CARD_TEXT(card, 2, "vniccedc"); @@ -1810,10 +1800,7 @@ static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char, __ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char; - /* prepare callback control */ - cbctl.sub_cmd = cmd; - - return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl); + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL); } /* VNICC get/set timeout for characteristic request */ @@ -1837,7 +1824,6 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc, getset_timeout->timeout = *timeout; /* prepare callback control */ - cbctl.sub_cmd = cmd; if (cmd == IPA_VNICC_GET_TIMEOUT) cbctl.result.timeout = timeout; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 2dd99f103671..d7bfc7a0e4c0 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1435,27 +1435,19 @@ static void qeth_l3_stop_card(struct qeth_card *card) flush_workqueue(card->event_wq); } -/* - * test for and Switch promiscuous mode (on or off) - * either for guestlan or HiperSocket Sniffer - */ -static void -qeth_l3_handle_promisc_mode(struct qeth_card *card) +static void qeth_l3_set_promisc_mode(struct qeth_card *card) { - struct net_device *dev = card->dev; + bool enable = card->dev->flags & IFF_PROMISC; - if (((dev->flags & IFF_PROMISC) && - (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || - (!(dev->flags & IFF_PROMISC) && - (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) + if (card->info.promisc_mode == enable) return; if (IS_VM_NIC(card)) { /* Guestlan trace */ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) - qeth_setadp_promisc_mode(card); + qeth_setadp_promisc_mode(card, enable); } else if (card->options.sniffer && /* HiperSockets trace */ qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { - if (dev->flags & IFF_PROMISC) { + if (enable) { QETH_CARD_TEXT(card, 3, "+promisc"); qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); } else { @@ -1502,11 +1494,9 @@ static void qeth_l3_rx_mode_work(struct work_struct *work) addr->disp_flag = QETH_DISP_ADDR_DELETE; } } - - if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) - return; } - qeth_l3_handle_promisc_mode(card); + + qeth_l3_set_promisc_mode(card); } static int qeth_l3_arp_makerc(u16 rc) @@ -1967,7 +1957,6 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, /* some HW requires combined L3+L4 csum offload: */ if (ipv == 4) hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; - QETH_TXQ_STAT_INC(queue, skbs_csum); } } @@ -2054,9 +2043,10 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, u16 txq = skb_get_queue_mapping(skb); int ipv = qeth_get_ip_version(skb); struct qeth_qdio_out_q *queue; - int tx_bytes = skb->len; int rc; + if (!skb_is_gso(skb)) + qdisc_skb_cb(skb)->pkt_len = skb->len; if (IS_IQD(card)) { queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)]; @@ -2079,11 +2069,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, else rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header); - if (!rc) { - QETH_TXQ_STAT_INC(queue, tx_packets); - QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); + if (!rc) return NETDEV_TX_OK; - } tx_drop: QETH_TXQ_STAT_INC(queue, tx_dropped); diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 7796799bf04a..9ff9429395eb 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -346,7 +346,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } else { cp = skb_put(skb, tlen); } diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 00dd47bcbb1e..587d4bbb7d22 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1522,8 +1522,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(skb_frag_page(frag)) - + frag->page_offset; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } else { cp = skb_put(skb, tlen); } diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index ba4603d76284..a20ddc301c89 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(fcoe_get_wwn); u32 fcoe_fc_crc(struct fc_frame *fp) { struct sk_buff *skb = fp_skb(fp); - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned char *data; unsigned long off, len, clen; u32 crc; @@ -318,7 +318,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; - off = frag->page_offset; + off = skb_frag_off(frag); len = skb_frag_size(frag); while (len > 0) { clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index a42babde036d..42542720962f 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1077,7 +1077,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } else { cp = skb_put(skb, tlen); } diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index da2d2ab8104d..7c3ae52f2b15 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -595,7 +595,7 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in) { int i; u16 o; - u16 pwr_info_offset[] = { + static const u16 pwr_info_offset[] = { SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1, SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3 }; diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index b9c7f0dc653b..6f1fa4c849a1 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -123,4 +123,6 @@ source "drivers/staging/uwb/Kconfig" source "drivers/staging/exfat/Kconfig" +source "drivers/staging/qlge/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 4b5962006c4b..a90f9b308c8d 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -52,3 +52,4 @@ obj-$(CONFIG_ISDN_CAPI) += isdn/ obj-$(CONFIG_UWB) += uwb/ obj-$(CONFIG_USB_WUSB) += wusbcore/ obj-$(CONFIG_EXFAT_FS) += exfat/ +obj-$(CONFIG_QLGE) += qlge/ diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig index 1e3012b9991c..5319909eb2f6 100644 --- a/drivers/staging/octeon/Kconfig +++ b/drivers/staging/octeon/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 config OCTEON_ETHERNET tristate "Cavium Networks Octeon Ethernet support" - depends on CAVIUM_OCTEON_SOC && NETDEVICES + depends on CAVIUM_OCTEON_SOC || COMPILE_TEST + depends on NETDEVICES select PHYLIB select MDIO_OCTEON help diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h index 1e114422993a..ef9e767b0e2e 100644 --- a/drivers/staging/octeon/ethernet-defines.h +++ b/drivers/staging/octeon/ethernet-defines.h @@ -21,8 +21,6 @@ #ifndef __ETHERNET_DEFINES_H__ #define __ETHERNET_DEFINES_H__ -#include <asm/octeon/cvmx-config.h> - #ifdef CONFIG_NETFILTER #define REUSE_SKBUFFS_WITHOUT_FREE 0 #else diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index 2aee64fdaec5..ffac0c4b3f5c 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -13,15 +13,11 @@ #include <generated/utsrelease.h> #include <net/dst.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-mdio.h" #include "ethernet-util.h" -#include <asm/octeon/cvmx-gmxx-defs.h> - static void cvm_oct_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c index 0d26c4a93ec1..532594957ebc 100644 --- a/drivers/staging/octeon/ethernet-mem.c +++ b/drivers/staging/octeon/ethernet-mem.c @@ -9,13 +9,10 @@ #include <linux/netdevice.h> #include <linux/slab.h> -#include <asm/octeon/octeon.h> - +#include "octeon-ethernet.h" #include "ethernet-mem.h" #include "ethernet-defines.h" -#include <asm/octeon/cvmx-fpa.h> - /** * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs * @pool: Pool to allocate an skbuff for diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c index c15376d33891..d91fd5ce9e68 100644 --- a/drivers/staging/octeon/ethernet-rgmii.c +++ b/drivers/staging/octeon/ethernet-rgmii.c @@ -12,19 +12,11 @@ #include <linux/ratelimit.h> #include <net/dst.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-util.h" #include "ethernet-mdio.h" -#include <asm/octeon/cvmx-helper.h> - -#include <asm/octeon/cvmx-ipd-defs.h> -#include <asm/octeon/cvmx-npi-defs.h> -#include <asm/octeon/cvmx-gmxx-defs.h> - static DEFINE_SPINLOCK(global_register_lock); static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable) diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 5e271245273c..0e65955c746b 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -23,23 +23,12 @@ #include <net/xfrm.h> #endif /* CONFIG_XFRM */ -#include <asm/octeon/octeon.h> - +#include "octeon-ethernet.h" #include "ethernet-defines.h" #include "ethernet-mem.h" #include "ethernet-rx.h" -#include "octeon-ethernet.h" #include "ethernet-util.h" -#include <asm/octeon/cvmx-helper.h> -#include <asm/octeon/cvmx-wqe.h> -#include <asm/octeon/cvmx-fau.h> -#include <asm/octeon/cvmx-pow.h> -#include <asm/octeon/cvmx-pip.h> -#include <asm/octeon/cvmx-scratch.h> - -#include <asm/octeon/cvmx-gmxx-defs.h> - static atomic_t oct_rx_ready = ATOMIC_INIT(0); static struct oct_rx_group { diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h index 096553d8fc99..ff6482fa20d6 100644 --- a/drivers/staging/octeon/ethernet-rx.h +++ b/drivers/staging/octeon/ethernet-rx.h @@ -5,8 +5,6 @@ * Copyright (c) 2003-2007 Cavium Networks */ -#include <asm/octeon/cvmx-fau.h> - void cvm_oct_poll_controller(struct net_device *dev); void cvm_oct_rx_initialize(void); void cvm_oct_rx_shutdown(void); diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c index a4a8f094e2b4..d7fbd9159302 100644 --- a/drivers/staging/octeon/ethernet-sgmii.c +++ b/drivers/staging/octeon/ethernet-sgmii.c @@ -11,17 +11,11 @@ #include <linux/ratelimit.h> #include <net/dst.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-util.h" #include "ethernet-mdio.h" -#include <asm/octeon/cvmx-helper.h> - -#include <asm/octeon/cvmx-gmxx-defs.h> - int cvm_oct_sgmii_open(struct net_device *dev) { return cvm_oct_common_open(dev, cvm_oct_link_poll); diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c index 01efdf2a2c20..c582403e6a1f 100644 --- a/drivers/staging/octeon/ethernet-spi.c +++ b/drivers/staging/octeon/ethernet-spi.c @@ -10,18 +10,10 @@ #include <linux/interrupt.h> #include <net/dst.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-util.h" -#include <asm/octeon/cvmx-spi.h> - -#include <asm/octeon/cvmx-npi-defs.h> -#include <asm/octeon/cvmx-spxx-defs.h> -#include <asm/octeon/cvmx-stxx-defs.h> - static int number_spi_ports; static int need_retrain[2] = { 0, 0 }; diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 20f513fbaa85..c64728fc21f2 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -22,21 +22,11 @@ #include <linux/atomic.h> #include <net/sch_generic.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-tx.h" #include "ethernet-util.h" -#include <asm/octeon/cvmx-wqe.h> -#include <asm/octeon/cvmx-fau.h> -#include <asm/octeon/cvmx-pip.h> -#include <asm/octeon/cvmx-pko.h> -#include <asm/octeon/cvmx-helper.h> - -#include <asm/octeon/cvmx-gmxx-defs.h> - #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb)) /* @@ -280,12 +270,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) hw_buffer.s.size = skb_headlen(skb); CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; + skb_frag_t *fs = skb_shinfo(skb)->frags + i; hw_buffer.s.addr = - XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + - fs->page_offset)); - hw_buffer.s.size = fs->size; + XKPHYS_TO_PHYS((u64)skb_frag_address(fs)); + hw_buffer.s.size = skb_frag_size(fs); CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; } hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb)); diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h index 31a82873e15c..2af83a12ca78 100644 --- a/drivers/staging/octeon/ethernet-util.h +++ b/drivers/staging/octeon/ethernet-util.h @@ -5,10 +5,6 @@ * Copyright (c) 2003-2007 Cavium Networks */ -#include <asm/octeon/cvmx-pip.h> -#include <asm/octeon/cvmx-helper.h> -#include <asm/octeon/cvmx-helper-util.h> - /** * cvm_oct_get_buffer_ptr - convert packet data address to pointer * @packet_ptr: Packet data hardware address diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 33762f2e9a44..cf8e9a23ebf9 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -19,24 +19,14 @@ #include <net/dst.h> -#include <asm/octeon/octeon.h> - -#include "ethernet-defines.h" #include "octeon-ethernet.h" +#include "ethernet-defines.h" #include "ethernet-mem.h" #include "ethernet-rx.h" #include "ethernet-tx.h" #include "ethernet-mdio.h" #include "ethernet-util.h" -#include <asm/octeon/cvmx-pip.h> -#include <asm/octeon/cvmx-pko.h> -#include <asm/octeon/cvmx-fau.h> -#include <asm/octeon/cvmx-ipd.h> -#include <asm/octeon/cvmx-helper.h> -#include <asm/octeon/cvmx-asxx-defs.h> -#include <asm/octeon/cvmx-gmxx-defs.h> - #define OCTEON_MAX_MTU 65392 static int num_packet_buffers = 1024; diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index be570d33685a..a8a864b40913 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -13,7 +13,34 @@ #include <linux/of.h> #include <linux/phy.h> -#include <asm/octeon/cvmx-helper-board.h> + +#ifdef CONFIG_MIPS + +#include <asm/octeon/octeon.h> + +#include <asm/octeon/cvmx-asxx-defs.h> +#include <asm/octeon/cvmx-config.h> +#include <asm/octeon/cvmx-fau.h> +#include <asm/octeon/cvmx-gmxx-defs.h> +#include <asm/octeon/cvmx-helper.h> +#include <asm/octeon/cvmx-helper-util.h> +#include <asm/octeon/cvmx-ipd.h> +#include <asm/octeon/cvmx-ipd-defs.h> +#include <asm/octeon/cvmx-npi-defs.h> +#include <asm/octeon/cvmx-pip.h> +#include <asm/octeon/cvmx-pko.h> +#include <asm/octeon/cvmx-pow.h> +#include <asm/octeon/cvmx-scratch.h> +#include <asm/octeon/cvmx-spi.h> +#include <asm/octeon/cvmx-spxx-defs.h> +#include <asm/octeon/cvmx-stxx-defs.h> +#include <asm/octeon/cvmx-wqe.h> + +#else + +#include "octeon-stubs.h" + +#endif /** * This is the definition of the Ethernet driver's private diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h new file mode 100644 index 000000000000..a4ac3bfb62a8 --- /dev/null +++ b/drivers/staging/octeon/octeon-stubs.h @@ -0,0 +1,1429 @@ +#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512 +#define XKPHYS_TO_PHYS(p) (p) + +#define OCTEON_IRQ_WORKQ0 0 +#define OCTEON_IRQ_RML 0 +#define OCTEON_IRQ_TIMER1 0 +#define OCTEON_IS_MODEL(x) 0 +#define octeon_has_feature(x) 0 +#define octeon_get_clock_rate() 0 + +#define CVMX_SYNCIOBDMA do { } while(0) + +#define CVMX_HELPER_INPUT_TAG_TYPE 0 +#define CVMX_HELPER_FIRST_MBUFF_SKIP 7 +#define CVMX_FAU_REG_END (2048) +#define CVMX_FPA_OUTPUT_BUFFER_POOL (2) +#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 16 +#define CVMX_FPA_PACKET_POOL (0) +#define CVMX_FPA_PACKET_POOL_SIZE 16 +#define CVMX_FPA_WQE_POOL (1) +#define CVMX_FPA_WQE_POOL_SIZE 16 +#define CVMX_GMXX_RXX_ADR_CAM_EN(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CTL(a, b) ((a)+(b)) +#define CVMX_GMXX_PRTX_CFG(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_FRM_MAX(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_JABBER(a, b) ((a)+(b)) +#define CVMX_IPD_CTL_STATUS 0 +#define CVMX_PIP_FRM_LEN_CHKX(a) (a) +#define CVMX_PIP_NUM_INPUT_PORTS 1 +#define CVMX_SCR_SCRATCH 0 +#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 2 +#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 2 +#define CVMX_IPD_SUB_PORT_FCS 0 +#define CVMX_SSO_WQ_IQ_DIS 0 +#define CVMX_SSO_WQ_INT 0 +#define CVMX_POW_WQ_INT 0 +#define CVMX_SSO_WQ_INT_PC 0 +#define CVMX_NPI_RSL_INT_BLOCKS 0 +#define CVMX_POW_WQ_INT_PC 0 + +typedef union { + uint64_t u64; + struct { + uint64_t bufs:8; + uint64_t ip_offset:8; + uint64_t vlan_valid:1; + uint64_t vlan_stacked:1; + uint64_t unassigned:1; + uint64_t vlan_cfi:1; + uint64_t vlan_id:12; + uint64_t pr:4; + uint64_t unassigned2:8; + uint64_t dec_ipcomp:1; + uint64_t tcp_or_udp:1; + uint64_t dec_ipsec:1; + uint64_t is_v6:1; + uint64_t software:1; + uint64_t L4_error:1; + uint64_t is_frag:1; + uint64_t IP_exc:1; + uint64_t is_bcast:1; + uint64_t is_mcast:1; + uint64_t not_IP:1; + uint64_t rcv_error:1; + uint64_t err_code:8; + } s; + struct { + uint64_t bufs:8; + uint64_t ip_offset:8; + uint64_t vlan_valid:1; + uint64_t vlan_stacked:1; + uint64_t unassigned:1; + uint64_t vlan_cfi:1; + uint64_t vlan_id:12; + uint64_t port:12; + uint64_t dec_ipcomp:1; + uint64_t tcp_or_udp:1; + uint64_t dec_ipsec:1; + uint64_t is_v6:1; + uint64_t software:1; + uint64_t L4_error:1; + uint64_t is_frag:1; + uint64_t IP_exc:1; + uint64_t is_bcast:1; + uint64_t is_mcast:1; + uint64_t not_IP:1; + uint64_t rcv_error:1; + uint64_t err_code:8; + } s_cn68xx; + + struct { + uint64_t unused1:16; + uint64_t vlan:16; + uint64_t unused2:32; + } svlan; + struct { + uint64_t bufs:8; + uint64_t unused:8; + uint64_t vlan_valid:1; + uint64_t vlan_stacked:1; + uint64_t unassigned:1; + uint64_t vlan_cfi:1; + uint64_t vlan_id:12; + uint64_t pr:4; + uint64_t unassigned2:12; + uint64_t software:1; + uint64_t unassigned3:1; + uint64_t is_rarp:1; + uint64_t is_arp:1; + uint64_t is_bcast:1; + uint64_t is_mcast:1; + uint64_t not_IP:1; + uint64_t rcv_error:1; + uint64_t err_code:8; + } snoip; + +} cvmx_pip_wqe_word2; + +union cvmx_pip_wqe_word0 { + struct { + uint64_t next_ptr:40; + uint8_t unused; + uint16_t hw_chksum; + } cn38xx; + struct { + uint64_t pknd:6; /* 0..5 */ + uint64_t unused2:2; /* 6..7 */ + uint64_t bpid:6; /* 8..13 */ + uint64_t unused1:18; /* 14..31 */ + uint64_t l2ptr:8; /* 32..39 */ + uint64_t l3ptr:8; /* 40..47 */ + uint64_t unused0:8; /* 48..55 */ + uint64_t l4ptr:8; /* 56..63 */ + } cn68xx; +}; + +union cvmx_wqe_word0 { + uint64_t u64; + union cvmx_pip_wqe_word0 pip; +}; + +union cvmx_wqe_word1 { + uint64_t u64; + struct { + uint64_t tag:32; + uint64_t tag_type:2; + uint64_t varies:14; + uint64_t len:16; + }; + struct { + uint64_t tag:32; + uint64_t tag_type:2; + uint64_t zero_2:3; + uint64_t grp:6; + uint64_t zero_1:1; + uint64_t qos:3; + uint64_t zero_0:1; + uint64_t len:16; + } cn68xx; + struct { + uint64_t tag:32; + uint64_t tag_type:2; + uint64_t zero_2:1; + uint64_t grp:4; + uint64_t qos:3; + uint64_t ipprt:6; + uint64_t len:16; + } cn38xx; +}; + +union cvmx_buf_ptr { + void *ptr; + uint64_t u64; + struct { + uint64_t i:1; + uint64_t back:4; + uint64_t pool:3; + uint64_t size:16; + uint64_t addr:40; + } s; +}; + +typedef struct { + union cvmx_wqe_word0 word0; + union cvmx_wqe_word1 word1; + cvmx_pip_wqe_word2 word2; + union cvmx_buf_ptr packet_ptr; + uint8_t packet_data[96]; +} cvmx_wqe_t; + +typedef union { + uint64_t u64; + struct { + uint64_t reserved_20_63:44; + uint64_t link_up:1; /**< Is the physical link up? */ + uint64_t full_duplex:1; /**< 1 if the link is full duplex */ + uint64_t speed:18; /**< Speed of the link in Mbps */ + } s; +} cvmx_helper_link_info_t; + +typedef enum { + CVMX_FAU_REG_32_START = 0, +} cvmx_fau_reg_32_t; + +typedef enum { + CVMX_FAU_OP_SIZE_8 = 0, + CVMX_FAU_OP_SIZE_16 = 1, + CVMX_FAU_OP_SIZE_32 = 2, + CVMX_FAU_OP_SIZE_64 = 3 +} cvmx_fau_op_size_t; + +typedef enum { + CVMX_SPI_MODE_UNKNOWN = 0, + CVMX_SPI_MODE_TX_HALFPLEX = 1, + CVMX_SPI_MODE_RX_HALFPLEX = 2, + CVMX_SPI_MODE_DUPLEX = 3 +} cvmx_spi_mode_t; + +typedef enum { + CVMX_HELPER_INTERFACE_MODE_DISABLED, + CVMX_HELPER_INTERFACE_MODE_RGMII, + CVMX_HELPER_INTERFACE_MODE_GMII, + CVMX_HELPER_INTERFACE_MODE_SPI, + CVMX_HELPER_INTERFACE_MODE_PCIE, + CVMX_HELPER_INTERFACE_MODE_XAUI, + CVMX_HELPER_INTERFACE_MODE_SGMII, + CVMX_HELPER_INTERFACE_MODE_PICMG, + CVMX_HELPER_INTERFACE_MODE_NPI, + CVMX_HELPER_INTERFACE_MODE_LOOP, +} cvmx_helper_interface_mode_t; + +typedef enum { + CVMX_POW_WAIT = 1, + CVMX_POW_NO_WAIT = 0, +} cvmx_pow_wait_t; + +typedef enum { + CVMX_PKO_LOCK_NONE = 0, + CVMX_PKO_LOCK_ATOMIC_TAG = 1, + CVMX_PKO_LOCK_CMD_QUEUE = 2, +} cvmx_pko_lock_t; + +typedef enum { + CVMX_PKO_SUCCESS, + CVMX_PKO_INVALID_PORT, + CVMX_PKO_INVALID_QUEUE, + CVMX_PKO_INVALID_PRIORITY, + CVMX_PKO_NO_MEMORY, + CVMX_PKO_PORT_ALREADY_SETUP, + CVMX_PKO_CMD_QUEUE_INIT_ERROR +} cvmx_pko_status_t; + +enum cvmx_pow_tag_type { + CVMX_POW_TAG_TYPE_ORDERED = 0L, + CVMX_POW_TAG_TYPE_ATOMIC = 1L, + CVMX_POW_TAG_TYPE_NULL = 2L, + CVMX_POW_TAG_TYPE_NULL_NULL = 3L +}; + +union cvmx_ipd_ctl_status { + uint64_t u64; + struct cvmx_ipd_ctl_status_s { + uint64_t reserved_18_63:46; + uint64_t use_sop:1; + uint64_t rst_done:1; + uint64_t clken:1; + uint64_t no_wptr:1; + uint64_t pq_apkt:1; + uint64_t pq_nabuf:1; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } s; + struct cvmx_ipd_ctl_status_cn30xx { + uint64_t reserved_10_63:54; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn30xx; + struct cvmx_ipd_ctl_status_cn38xxp2 { + uint64_t reserved_9_63:55; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn38xxp2; + struct cvmx_ipd_ctl_status_cn50xx { + uint64_t reserved_15_63:49; + uint64_t no_wptr:1; + uint64_t pq_apkt:1; + uint64_t pq_nabuf:1; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn50xx; + struct cvmx_ipd_ctl_status_cn58xx { + uint64_t reserved_12_63:52; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn58xx; + struct cvmx_ipd_ctl_status_cn63xxp1 { + uint64_t reserved_16_63:48; + uint64_t clken:1; + uint64_t no_wptr:1; + uint64_t pq_apkt:1; + uint64_t pq_nabuf:1; + uint64_t ipd_full:1; + uint64_t pkt_off:1; + uint64_t len_m8:1; + uint64_t reset:1; + uint64_t addpkt:1; + uint64_t naddbuf:1; + uint64_t pkt_lend:1; + uint64_t wqe_lend:1; + uint64_t pbp_en:1; + uint64_t opc_mode:2; + uint64_t ipd_en:1; + } cn63xxp1; +}; + +union cvmx_ipd_sub_port_fcs { + uint64_t u64; + struct cvmx_ipd_sub_port_fcs_s { + uint64_t port_bit:32; + uint64_t reserved_32_35:4; + uint64_t port_bit2:4; + uint64_t reserved_40_63:24; + } s; + struct cvmx_ipd_sub_port_fcs_cn30xx { + uint64_t port_bit:3; + uint64_t reserved_3_63:61; + } cn30xx; + struct cvmx_ipd_sub_port_fcs_cn38xx { + uint64_t port_bit:32; + uint64_t reserved_32_63:32; + } cn38xx; +}; + +union cvmx_ipd_sub_port_qos_cnt { + uint64_t u64; + struct cvmx_ipd_sub_port_qos_cnt_s { + uint64_t cnt:32; + uint64_t port_qos:9; + uint64_t reserved_41_63:23; + } s; +}; +typedef struct { + uint32_t dropped_octets; + uint32_t dropped_packets; + uint32_t pci_raw_packets; + uint32_t octets; + uint32_t packets; + uint32_t multicast_packets; + uint32_t broadcast_packets; + uint32_t len_64_packets; + uint32_t len_65_127_packets; + uint32_t len_128_255_packets; + uint32_t len_256_511_packets; + uint32_t len_512_1023_packets; + uint32_t len_1024_1518_packets; + uint32_t len_1519_max_packets; + uint32_t fcs_align_err_packets; + uint32_t runt_packets; + uint32_t runt_crc_packets; + uint32_t oversize_packets; + uint32_t oversize_crc_packets; + uint32_t inb_packets; + uint64_t inb_octets; + uint16_t inb_errors; +} cvmx_pip_port_status_t; + +typedef struct { + uint32_t packets; + uint64_t octets; + uint64_t doorbell; +} cvmx_pko_port_status_t; + +union cvmx_pip_frm_len_chkx { + uint64_t u64; + struct cvmx_pip_frm_len_chkx_s { + uint64_t reserved_32_63:32; + uint64_t maxlen:16; + uint64_t minlen:16; + } s; +}; + +union cvmx_gmxx_rxx_frm_ctl { + uint64_t u64; + struct cvmx_gmxx_rxx_frm_ctl_s { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t pad_len:1; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_11:1; + uint64_t ptp_mode:1; + uint64_t reserved_13_63:51; + } s; + struct cvmx_gmxx_rxx_frm_ctl_cn30xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t pad_len:1; + uint64_t reserved_9_63:55; + } cn30xx; + struct cvmx_gmxx_rxx_frm_ctl_cn31xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t reserved_8_63:56; + } cn31xx; + struct cvmx_gmxx_rxx_frm_ctl_cn50xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t reserved_7_8:2; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_63:53; + } cn50xx; + struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t reserved_7_8:2; + uint64_t pre_align:1; + uint64_t reserved_10_63:54; + } cn56xxp1; + struct cvmx_gmxx_rxx_frm_ctl_cn58xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t vlan_len:1; + uint64_t pad_len:1; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_63:53; + } cn58xx; + struct cvmx_gmxx_rxx_frm_ctl_cn61xx { + uint64_t pre_chk:1; + uint64_t pre_strp:1; + uint64_t ctl_drp:1; + uint64_t ctl_bck:1; + uint64_t ctl_mcst:1; + uint64_t ctl_smac:1; + uint64_t pre_free:1; + uint64_t reserved_7_8:2; + uint64_t pre_align:1; + uint64_t null_dis:1; + uint64_t reserved_11_11:1; + uint64_t ptp_mode:1; + uint64_t reserved_13_63:51; + } cn61xx; +}; + +union cvmx_gmxx_rxx_int_reg { + uint64_t u64; + struct cvmx_gmxx_rxx_int_reg_s { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t maxerr:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t lenerr:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t hg2fld:1; + uint64_t hg2cc:1; + uint64_t reserved_29_63:35; + } s; + struct cvmx_gmxx_rxx_int_reg_cn30xx { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t maxerr:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t lenerr:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t reserved_19_63:45; + } cn30xx; + struct cvmx_gmxx_rxx_int_reg_cn50xx { + uint64_t reserved_0_0:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t reserved_6_6:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t pause_drp:1; + uint64_t reserved_20_63:44; + } cn50xx; + struct cvmx_gmxx_rxx_int_reg_cn52xx { + uint64_t reserved_0_0:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t reserved_5_6:2; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t reserved_9_9:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t reserved_16_18:3; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t hg2fld:1; + uint64_t hg2cc:1; + uint64_t reserved_29_63:35; + } cn52xx; + struct cvmx_gmxx_rxx_int_reg_cn56xxp1 { + uint64_t reserved_0_0:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t reserved_5_6:2; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t reserved_9_9:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t reserved_16_18:3; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t reserved_27_63:37; + } cn56xxp1; + struct cvmx_gmxx_rxx_int_reg_cn58xx { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t maxerr:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t alnerr:1; + uint64_t lenerr:1; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t niberr:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t phy_link:1; + uint64_t phy_spd:1; + uint64_t phy_dupx:1; + uint64_t pause_drp:1; + uint64_t reserved_20_63:44; + } cn58xx; + struct cvmx_gmxx_rxx_int_reg_cn61xx { + uint64_t minerr:1; + uint64_t carext:1; + uint64_t reserved_2_2:1; + uint64_t jabber:1; + uint64_t fcserr:1; + uint64_t reserved_5_6:2; + uint64_t rcverr:1; + uint64_t skperr:1; + uint64_t reserved_9_9:1; + uint64_t ovrerr:1; + uint64_t pcterr:1; + uint64_t rsverr:1; + uint64_t falerr:1; + uint64_t coldet:1; + uint64_t ifgerr:1; + uint64_t reserved_16_18:3; + uint64_t pause_drp:1; + uint64_t loc_fault:1; + uint64_t rem_fault:1; + uint64_t bad_seq:1; + uint64_t bad_term:1; + uint64_t unsop:1; + uint64_t uneop:1; + uint64_t undat:1; + uint64_t hg2fld:1; + uint64_t hg2cc:1; + uint64_t reserved_29_63:35; + } cn61xx; +}; + +union cvmx_gmxx_prtx_cfg { + uint64_t u64; + struct cvmx_gmxx_prtx_cfg_s { + uint64_t reserved_22_63:42; + uint64_t pknd:6; + uint64_t reserved_14_15:2; + uint64_t tx_idle:1; + uint64_t rx_idle:1; + uint64_t reserved_9_11:3; + uint64_t speed_msb:1; + uint64_t reserved_4_7:4; + uint64_t slottime:1; + uint64_t duplex:1; + uint64_t speed:1; + uint64_t en:1; + } s; + struct cvmx_gmxx_prtx_cfg_cn30xx { + uint64_t reserved_4_63:60; + uint64_t slottime:1; + uint64_t duplex:1; + uint64_t speed:1; + uint64_t en:1; + } cn30xx; + struct cvmx_gmxx_prtx_cfg_cn52xx { + uint64_t reserved_14_63:50; + uint64_t tx_idle:1; + uint64_t rx_idle:1; + uint64_t reserved_9_11:3; + uint64_t speed_msb:1; + uint64_t reserved_4_7:4; + uint64_t slottime:1; + uint64_t duplex:1; + uint64_t speed:1; + uint64_t en:1; + } cn52xx; +}; + +union cvmx_gmxx_rxx_adr_ctl { + uint64_t u64; + struct cvmx_gmxx_rxx_adr_ctl_s { + uint64_t reserved_4_63:60; + uint64_t cam_mode:1; + uint64_t mcst:2; + uint64_t bcst:1; + } s; +}; + +union cvmx_pip_prt_tagx { + uint64_t u64; + struct cvmx_pip_prt_tagx_s { + uint64_t reserved_54_63:10; + uint64_t portadd_en:1; + uint64_t inc_hwchk:1; + uint64_t reserved_50_51:2; + uint64_t grptagbase_msb:2; + uint64_t reserved_46_47:2; + uint64_t grptagmask_msb:2; + uint64_t reserved_42_43:2; + uint64_t grp_msb:2; + uint64_t grptagbase:4; + uint64_t grptagmask:4; + uint64_t grptag:1; + uint64_t grptag_mskip:1; + uint64_t tag_mode:2; + uint64_t inc_vs:2; + uint64_t inc_vlan:1; + uint64_t inc_prt_flag:1; + uint64_t ip6_dprt_flag:1; + uint64_t ip4_dprt_flag:1; + uint64_t ip6_sprt_flag:1; + uint64_t ip4_sprt_flag:1; + uint64_t ip6_nxth_flag:1; + uint64_t ip4_pctl_flag:1; + uint64_t ip6_dst_flag:1; + uint64_t ip4_dst_flag:1; + uint64_t ip6_src_flag:1; + uint64_t ip4_src_flag:1; + uint64_t tcp6_tag_type:2; + uint64_t tcp4_tag_type:2; + uint64_t ip6_tag_type:2; + uint64_t ip4_tag_type:2; + uint64_t non_tag_type:2; + uint64_t grp:4; + } s; + struct cvmx_pip_prt_tagx_cn30xx { + uint64_t reserved_40_63:24; + uint64_t grptagbase:4; + uint64_t grptagmask:4; + uint64_t grptag:1; + uint64_t reserved_30_30:1; + uint64_t tag_mode:2; + uint64_t inc_vs:2; + uint64_t inc_vlan:1; + uint64_t inc_prt_flag:1; + uint64_t ip6_dprt_flag:1; + uint64_t ip4_dprt_flag:1; + uint64_t ip6_sprt_flag:1; + uint64_t ip4_sprt_flag:1; + uint64_t ip6_nxth_flag:1; + uint64_t ip4_pctl_flag:1; + uint64_t ip6_dst_flag:1; + uint64_t ip4_dst_flag:1; + uint64_t ip6_src_flag:1; + uint64_t ip4_src_flag:1; + uint64_t tcp6_tag_type:2; + uint64_t tcp4_tag_type:2; + uint64_t ip6_tag_type:2; + uint64_t ip4_tag_type:2; + uint64_t non_tag_type:2; + uint64_t grp:4; + } cn30xx; + struct cvmx_pip_prt_tagx_cn50xx { + uint64_t reserved_40_63:24; + uint64_t grptagbase:4; + uint64_t grptagmask:4; + uint64_t grptag:1; + uint64_t grptag_mskip:1; + uint64_t tag_mode:2; + uint64_t inc_vs:2; + uint64_t inc_vlan:1; + uint64_t inc_prt_flag:1; + uint64_t ip6_dprt_flag:1; + uint64_t ip4_dprt_flag:1; + uint64_t ip6_sprt_flag:1; + uint64_t ip4_sprt_flag:1; + uint64_t ip6_nxth_flag:1; + uint64_t ip4_pctl_flag:1; + uint64_t ip6_dst_flag:1; + uint64_t ip4_dst_flag:1; + uint64_t ip6_src_flag:1; + uint64_t ip4_src_flag:1; + uint64_t tcp6_tag_type:2; + uint64_t tcp4_tag_type:2; + uint64_t ip6_tag_type:2; + uint64_t ip4_tag_type:2; + uint64_t non_tag_type:2; + uint64_t grp:4; + } cn50xx; +}; + +union cvmx_spxx_int_reg { + uint64_t u64; + struct cvmx_spxx_int_reg_s { + uint64_t reserved_32_63:32; + uint64_t spf:1; + uint64_t reserved_12_30:19; + uint64_t calerr:1; + uint64_t syncerr:1; + uint64_t diperr:1; + uint64_t tpaovr:1; + uint64_t rsverr:1; + uint64_t drwnng:1; + uint64_t clserr:1; + uint64_t spiovr:1; + uint64_t reserved_2_3:2; + uint64_t abnorm:1; + uint64_t prtnxa:1; + } s; +}; + +union cvmx_spxx_int_msk { + uint64_t u64; + struct cvmx_spxx_int_msk_s { + uint64_t reserved_12_63:52; + uint64_t calerr:1; + uint64_t syncerr:1; + uint64_t diperr:1; + uint64_t tpaovr:1; + uint64_t rsverr:1; + uint64_t drwnng:1; + uint64_t clserr:1; + uint64_t spiovr:1; + uint64_t reserved_2_3:2; + uint64_t abnorm:1; + uint64_t prtnxa:1; + } s; +}; + +union cvmx_pow_wq_int { + uint64_t u64; + struct cvmx_pow_wq_int_s { + uint64_t wq_int:16; + uint64_t iq_dis:16; + uint64_t reserved_32_63:32; + } s; +}; + +union cvmx_sso_wq_int_thrx { + uint64_t u64; + struct { + uint64_t iq_thr:12; + uint64_t reserved_12_13:2; + uint64_t ds_thr:12; + uint64_t reserved_26_27:2; + uint64_t tc_thr:4; + uint64_t tc_en:1; + uint64_t reserved_33_63:31; + } s; +}; + +union cvmx_stxx_int_reg { + uint64_t u64; + struct cvmx_stxx_int_reg_s { + uint64_t reserved_9_63:55; + uint64_t syncerr:1; + uint64_t frmerr:1; + uint64_t unxfrm:1; + uint64_t nosync:1; + uint64_t diperr:1; + uint64_t datovr:1; + uint64_t ovrbst:1; + uint64_t calpar1:1; + uint64_t calpar0:1; + } s; +}; + +union cvmx_stxx_int_msk { + uint64_t u64; + struct cvmx_stxx_int_msk_s { + uint64_t reserved_8_63:56; + uint64_t frmerr:1; + uint64_t unxfrm:1; + uint64_t nosync:1; + uint64_t diperr:1; + uint64_t datovr:1; + uint64_t ovrbst:1; + uint64_t calpar1:1; + uint64_t calpar0:1; + } s; +}; + +union cvmx_pow_wq_int_pc { + uint64_t u64; + struct cvmx_pow_wq_int_pc_s { + uint64_t reserved_0_7:8; + uint64_t pc_thr:20; + uint64_t reserved_28_31:4; + uint64_t pc:28; + uint64_t reserved_60_63:4; + } s; +}; + +union cvmx_pow_wq_int_thrx { + uint64_t u64; + struct cvmx_pow_wq_int_thrx_s { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_23_23:1; + uint64_t ds_thr:11; + uint64_t reserved_11_11:1; + uint64_t iq_thr:11; + } s; + struct cvmx_pow_wq_int_thrx_cn30xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_18_23:6; + uint64_t ds_thr:6; + uint64_t reserved_6_11:6; + uint64_t iq_thr:6; + } cn30xx; + struct cvmx_pow_wq_int_thrx_cn31xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_20_23:4; + uint64_t ds_thr:8; + uint64_t reserved_8_11:4; + uint64_t iq_thr:8; + } cn31xx; + struct cvmx_pow_wq_int_thrx_cn52xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_21_23:3; + uint64_t ds_thr:9; + uint64_t reserved_9_11:3; + uint64_t iq_thr:9; + } cn52xx; + struct cvmx_pow_wq_int_thrx_cn63xx { + uint64_t reserved_29_63:35; + uint64_t tc_en:1; + uint64_t tc_thr:4; + uint64_t reserved_22_23:2; + uint64_t ds_thr:10; + uint64_t reserved_10_11:2; + uint64_t iq_thr:10; + } cn63xx; +}; + +union cvmx_npi_rsl_int_blocks { + uint64_t u64; + struct cvmx_npi_rsl_int_blocks_s { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t reserved_28_29:2; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t reserved_13_14:2; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } s; + struct cvmx_npi_rsl_int_blocks_cn30xx { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t rint_29:1; + uint64_t rint_28:1; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t rint_14:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn30xx; + struct cvmx_npi_rsl_int_blocks_cn38xx { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t rint_29:1; + uint64_t rint_28:1; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t rint_14:1; + uint64_t rint_13:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn38xx; + struct cvmx_npi_rsl_int_blocks_cn50xx { + uint64_t reserved_31_63:33; + uint64_t iob:1; + uint64_t lmc1:1; + uint64_t agl:1; + uint64_t reserved_24_27:4; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t reserved_21_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t reserved_15_15:1; + uint64_t rad:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t reserved_8_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn50xx; +}; + +typedef union { + uint64_t u64; + struct { + uint64_t total_bytes:16; + uint64_t segs:6; + uint64_t dontfree:1; + uint64_t ignore_i:1; + uint64_t ipoffp1:7; + uint64_t gather:1; + uint64_t rsp:1; + uint64_t wqp:1; + uint64_t n2:1; + uint64_t le:1; + uint64_t reg0:11; + uint64_t subone0:1; + uint64_t reg1:11; + uint64_t subone1:1; + uint64_t size0:2; + uint64_t size1:2; + } s; +} cvmx_pko_command_word0_t; + +union cvmx_ciu_timx { + uint64_t u64; + struct cvmx_ciu_timx_s { + uint64_t reserved_37_63:27; + uint64_t one_shot:1; + uint64_t len:36; + } s; +}; + +union cvmx_gmxx_rxx_rx_inbnd { + uint64_t u64; + struct cvmx_gmxx_rxx_rx_inbnd_s { + uint64_t status:1; + uint64_t speed:2; + uint64_t duplex:1; + uint64_t reserved_4_63:60; + } s; +}; + +static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg, + int32_t value) +{ + return value; +} + +static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value) +{ } + +static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value) +{ } + +static inline uint64_t cvmx_scratch_read64(uint64_t address) +{ + return 0; +} + +static inline void cvmx_scratch_write64(uint64_t address, uint64_t value) +{ } + +static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work) +{ + return 0; +} + +static inline void *cvmx_phys_to_ptr(uint64_t physical_address) +{ + return (void *)(physical_address); +} + +static inline uint64_t cvmx_ptr_to_phys(void *ptr) +{ + return (unsigned long)ptr; +} + +static inline int cvmx_helper_get_interface_num(int ipd_port) +{ + return ipd_port; +} + +static inline int cvmx_helper_get_interface_index_num(int ipd_port) +{ + return ipd_port; +} + +static inline void cvmx_fpa_enable(void) +{ } + +static inline uint64_t cvmx_read_csr(uint64_t csr_addr) +{ + return 0; +} + +static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val) +{ } + +static inline int cvmx_helper_setup_red(int pass_thresh, int drop_thresh) +{ + return 0; +} + +static inline void *cvmx_fpa_alloc(uint64_t pool) +{ + return NULL; +} + +static inline void cvmx_fpa_free(void *ptr, uint64_t pool, + uint64_t num_cache_lines) +{ } + +static inline int octeon_is_simulation(void) +{ + return 1; +} + +static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear, + cvmx_pip_port_status_t *status) +{ } + +static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear, + cvmx_pko_port_status_t *status) +{ } + +static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int + interface) +{ + return 0; +} + +static inline cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port) +{ + cvmx_helper_link_info_t ret = { .u64 = 0 }; + + return ret; +} + +static inline int cvmx_helper_link_set(int ipd_port, + cvmx_helper_link_info_t link_info) +{ + return 0; +} + +static inline int cvmx_helper_initialize_packet_io_global(void) +{ + return 0; +} + +static inline int cvmx_helper_get_number_of_interfaces(void) +{ + return 2; +} + +static inline int cvmx_helper_ports_on_interface(int interface) +{ + return 1; +} + +static inline int cvmx_helper_get_ipd_port(int interface, int port) +{ + return 0; +} + +static inline int cvmx_helper_ipd_and_packet_input_enable(void) +{ + return 0; +} + +static inline void cvmx_ipd_disable(void) +{ } + +static inline void cvmx_ipd_free_ptr(void) +{ } + +static inline void cvmx_pko_disable(void) +{ } + +static inline void cvmx_pko_shutdown(void) +{ } + +static inline int cvmx_pko_get_base_queue_per_core(int port, int core) +{ + return port; +} + +static inline int cvmx_pko_get_base_queue(int port) +{ + return port; +} + +static inline int cvmx_pko_get_num_queues(int port) +{ + return port; +} + +static inline unsigned int cvmx_get_core_num(void) +{ + return 0; +} + +static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, + cvmx_pow_wait_t wait) +{ } + +static inline void cvmx_pow_work_request_async(int scr_addr, + cvmx_pow_wait_t wait) +{ } + +static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr) +{ + cvmx_wqe_t *wqe = (void *)(unsigned long)scr_addr; + + return wqe; +} + +static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait) +{ + return (void *)(unsigned long)wait; +} + +static inline int cvmx_spi_restart_interface(int interface, + cvmx_spi_mode_t mode, int timeout) +{ + return 0; +} + +static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr, + cvmx_fau_reg_32_t reg, + int32_t value) +{ } + +static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed( + int interface, + int port) +{ + union cvmx_gmxx_rxx_rx_inbnd r; + r.u64 = 0; + return r; +} + +static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue, + cvmx_pko_lock_t use_locking) +{ } + +static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port, + uint64_t queue, cvmx_pko_command_word0_t pko_command, + union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking) +{ + cvmx_pko_status_t ret = 0; + + return ret; +} + +static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port) +{ } + +static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos) +{ } + +static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work) +{ + return 0; +} + +static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp) +{ } + +static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag, + enum cvmx_pow_tag_type tag_type, + uint64_t qos, uint64_t grp) +{ } + +#define CVMX_ASXX_RX_CLK_SETX(a, b) ((a)+(b)) +#define CVMX_ASXX_TX_CLK_SETX(a, b) ((a)+(b)) +#define CVMX_CIU_TIMX(a) (a) +#define CVMX_GMXX_RXX_ADR_CAM0(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CAM1(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CAM2(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CAM3(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CAM4(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_ADR_CAM5(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_FRM_CTL(a, b) ((a)+(b)) +#define CVMX_GMXX_RXX_INT_REG(a, b) ((a)+(b)) +#define CVMX_GMXX_SMACX(a, b) ((a)+(b)) +#define CVMX_PIP_PRT_TAGX(a) (a) +#define CVMX_POW_PP_GRP_MSKX(a) (a) +#define CVMX_POW_WQ_INT_THRX(a) (a) +#define CVMX_SPXX_INT_MSK(a) (a) +#define CVMX_SPXX_INT_REG(a) (a) +#define CVMX_SSO_PPX_GRP_MSK(a) (a) +#define CVMX_SSO_WQ_INT_THRX(a) (a) +#define CVMX_STXX_INT_MSK(a) (a) +#define CVMX_STXX_INT_REG(a) (a) diff --git a/drivers/staging/qlge/Kconfig b/drivers/staging/qlge/Kconfig new file mode 100644 index 000000000000..a3cb25a3ab80 --- /dev/null +++ b/drivers/staging/qlge/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +config QLGE + tristate "QLogic QLGE 10Gb Ethernet Driver Support" + depends on ETHERNET && PCI + help + This driver supports QLogic ISP8XXX 10Gb Ethernet cards. + + To compile this driver as a module, choose M here. The module will be + called qlge. diff --git a/drivers/net/ethernet/qlogic/qlge/Makefile b/drivers/staging/qlge/Makefile index 1dc2568e820c..1dc2568e820c 100644 --- a/drivers/net/ethernet/qlogic/qlge/Makefile +++ b/drivers/staging/qlge/Makefile diff --git a/drivers/staging/qlge/TODO b/drivers/staging/qlge/TODO new file mode 100644 index 000000000000..51c509084e80 --- /dev/null +++ b/drivers/staging/qlge/TODO @@ -0,0 +1,46 @@ +* reception stalls permanently (until admin intervention) if the rx buffer + queues become empty because of allocation failures (ex. under memory + pressure) +* commit 7c734359d350 ("qlge: Size RX buffers based on MTU.", v2.6.33-rc1) + introduced dead code in the receive routines, which should be rewritten + anyways by the admission of the author himself, see the comment above + ql_build_rx_skb(). That function is now used exclusively to handle packets + that underwent header splitting but it still contains code to handle non + split cases. +* truesize accounting is incorrect (ex: a 9000B frame has skb->truesize 10280 + while containing two frags of order-1 allocations, ie. >16K) +* while in that area, using two 8k buffers to store one 9k frame is a poor + choice of buffer size. +* in the "chain of large buffers" case, the driver uses an skb allocated with + head room but only puts data in the frags. +* rename "rx" queues to "completion" queues. Calling tx completion queues "rx + queues" is confusing. +* struct rx_ring is used for rx and tx completions, with some members relevant + to one case only +* there is an inordinate amount of disparate debugging code, most of which is + of questionable value. In particular, qlge_dbg.c has hundreds of lines of + code bitrotting away in ifdef land (doesn't compile since commit + 18c49b91777c ("qlge: do vlan cleanup", v3.1-rc1), 8 years ago). +* triggering an ethtool regdump will hexdump a 176k struct to dmesg depending + on some module parameters. +* the flow control implementation in firmware is buggy (sends a flood of pause + frames, resets the link, device and driver buffer queues become + desynchronized), disable it by default +* some structures are initialized redundantly (ex. memset 0 after + alloc_etherdev()) +* the driver has a habit of using runtime checks where compile time checks are + possible (ex. ql_free_rx_buffers(), ql_alloc_rx_buffers()) +* reorder struct members to avoid holes if it doesn't impact performance +* in terms of namespace, the driver uses either qlge_, ql_ (used by + other qlogic drivers, with clashes, ex: ql_sem_spinlock) or nothing (with + clashes, ex: struct ob_mac_iocb_req). Rename everything to use the "qlge_" + prefix. +* avoid legacy/deprecated apis (ex. replace pci_dma_*, replace pci_enable_msi, + use pci_iomap) +* some "while" loops could be rewritten with simple "for", ex. + ql_wait_reg_rdy(), ql_start_rx_ring()) +* remove duplicate and useless comments +* fix weird line wrapping (all over, ex. the ql_set_routing_reg() calls in + qlge_set_multicast_list()). +* fix weird indentation (all over, ex. the for loops in qlge_get_stats()) +* fix checkpatch issues diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/staging/qlge/qlge.h index ad7c5eb8a3b6..ad7c5eb8a3b6 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/staging/qlge/qlge.h diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c index 31389ab8bdf7..31389ab8bdf7 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ b/drivers/staging/qlge/qlge_dbg.c diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c index a6886cc5654c..a6886cc5654c 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/staging/qlge/qlge_ethtool.c diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 6cae33072496..6cae33072496 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c index 957c72985a06..957c72985a06 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c +++ b/drivers/staging/qlge/qlge_mpi.c diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index 40dd573e73c3..1d1440d43002 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@ -284,9 +284,9 @@ static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb, for (frag = 0; frag < numfrags; frag++) { count = add_physinfo_entries(page_to_pfn( skb_frag_page(&skb_shinfo(skb)->frags[frag])), - skb_shinfo(skb)->frags[frag].page_offset, - skb_shinfo(skb)->frags[frag].size, count, - frags_max, frags); + skb_frag_off(&skb_shinfo(skb)->frags[frag]), + skb_frag_size(&skb_shinfo(skb)->frags[frag]), + count, frags_max, frags); /* add_physinfo_entries only returns * zero if the frags array is out of room * That should never happen because we diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index 24309d937d8c..fcdc4211e3c2 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -899,9 +899,9 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx]; sg_init_table(&ccmd->sg, 1); - sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag), - dfrag->page_offset); - get_page(dfrag->page.p); + sg_set_page(&ccmd->sg, skb_frag_page(dfrag), + skb_frag_size(dfrag), skb_frag_off(dfrag)); + get_page(skb_frag_page(dfrag)); cmd->se_cmd.t_data_sg = &ccmd->sg; cmd->se_cmd.t_data_nents = 1; @@ -1403,7 +1403,8 @@ static void cxgbit_lro_skb_dump(struct sk_buff *skb) pdu_cb->ddigest, pdu_cb->frags); for (i = 0; i < ssi->nr_frags; i++) pr_info("skb 0x%p, frag %d, off %u, sz %u.\n", - skb, i, ssi->frags[i].page_offset, ssi->frags[i].size); + skb, i, skb_frag_off(&ssi->frags[i]), + skb_frag_size(&ssi->frags[i])); } static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk) @@ -1447,7 +1448,7 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) hpdu_cb->frags++; hpdu_cb->hfrag_idx = hfrag_idx; - len = hssi->frags[hfrag_idx].size; + len = skb_frag_size(&hssi->frags[hfrag_idx]); hskb->len += len; hskb->data_len += len; hskb->truesize += len; @@ -1467,7 +1468,7 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) get_page(skb_frag_page(&hssi->frags[dfrag_idx])); - len += hssi->frags[dfrag_idx].size; + len += skb_frag_size(&hssi->frags[dfrag_idx]); hssi->nr_frags++; hpdu_cb->frags++; diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 6a50e1d0529c..9f57736fe15e 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -102,7 +102,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, struct iov_iter iov_iter; unsigned out, in; size_t nbytes; - size_t len; + size_t iov_len, payload_len; int head; spin_lock_bh(&vsock->send_pkt_list_lock); @@ -147,8 +147,24 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, break; } - len = iov_length(&vq->iov[out], in); - iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len); + iov_len = iov_length(&vq->iov[out], in); + if (iov_len < sizeof(pkt->hdr)) { + virtio_transport_free_pkt(pkt); + vq_err(vq, "Buffer len [%zu] too small\n", iov_len); + break; + } + + iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len); + payload_len = pkt->len - pkt->off; + + /* If the packet is greater than the space available in the + * buffer, we split it using multiple buffers. + */ + if (payload_len > iov_len - sizeof(pkt->hdr)) + payload_len = iov_len - sizeof(pkt->hdr); + + /* Set the correct length in the header */ + pkt->hdr.len = cpu_to_le32(payload_len); nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); if (nbytes != sizeof(pkt->hdr)) { @@ -157,33 +173,47 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, break; } - nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter); - if (nbytes != pkt->len) { + nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len, + &iov_iter); + if (nbytes != payload_len) { virtio_transport_free_pkt(pkt); vq_err(vq, "Faulted on copying pkt buf\n"); break; } - vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); + vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); added = true; - if (pkt->reply) { - int val; - - val = atomic_dec_return(&vsock->queued_replies); - - /* Do we have resources to resume tx processing? */ - if (val + 1 == tx_vq->num) - restart_tx = true; - } - /* Deliver to monitoring devices all correctly transmitted * packets. */ virtio_transport_deliver_tap_pkt(pkt); - total_len += pkt->len; - virtio_transport_free_pkt(pkt); + pkt->off += payload_len; + total_len += payload_len; + + /* If we didn't send all the payload we can requeue the packet + * to send it with the next available buffer. + */ + if (pkt->off < pkt->len) { + spin_lock_bh(&vsock->send_pkt_list_lock); + list_add(&pkt->list, &vsock->send_pkt_list); + spin_unlock_bh(&vsock->send_pkt_list_lock); + } else { + if (pkt->reply) { + int val; + + val = atomic_dec_return(&vsock->queued_replies); + + /* Do we have resources to resume tx + * processing? + */ + if (val + 1 == tx_vq->num) + restart_tx = true; + } + + virtio_transport_free_pkt(pkt); + } } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); if (added) vhost_signal(&vsock->dev, vq); @@ -329,6 +359,8 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, return NULL; } + pkt->buf_len = pkt->len; + nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter); if (nbytes != pkt->len) { vq_err(vq, "Expected %u byte payload, got %zu bytes\n", |