diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c')
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 592 |
1 files changed, 503 insertions, 89 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a268004c8e0e..c3205ae620ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -68,6 +68,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, unsigned long *addr); static int hclge_set_default_loopback(struct hclge_dev *hdev); +static void hclge_sync_mac_table(struct hclge_dev *hdev); + static struct hnae3_ae_algo ae_algo; static struct workqueue_struct *hclge_wq; @@ -1685,6 +1687,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) INIT_LIST_HEAD(&vport->vlan_list); INIT_LIST_HEAD(&vport->uc_mac_list); INIT_LIST_HEAD(&vport->mc_mac_list); + spin_lock_init(&vport->mac_list_lock); if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -3971,6 +3974,7 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev) * updated when it is triggered by mbx. */ hclge_update_link_status(hdev); + hclge_sync_mac_table(hdev); if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { delta = jiffies - hdev->last_serv_processed; @@ -6922,8 +6926,16 @@ static void hclge_ae_stop(struct hnae3_handle *handle) int hclge_vport_start(struct hclge_vport *vport) { + struct hclge_dev *hdev = vport->back; + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); vport->last_active_jiffies = jiffies; + + if (test_bit(vport->vport_id, hdev->vport_config_block)) + hclge_restore_mac_table_common(vport); + + clear_bit(vport->vport_id, hdev->vport_config_block); + return 0; } @@ -7291,12 +7303,106 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) mutex_unlock(&hdev->umv_mutex); } +static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list, + const u8 *mac_addr) +{ + struct hclge_mac_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) + if (ether_addr_equal(mac_addr, mac_node->mac_addr)) + return mac_node; + + return NULL; +} + +static void hclge_update_mac_node(struct hclge_mac_node *mac_node, + enum HCLGE_MAC_NODE_STATE state) +{ + switch (state) { + /* from set_rx_mode or tmp_add_list */ + case HCLGE_MAC_TO_ADD: + if (mac_node->state == HCLGE_MAC_TO_DEL) + mac_node->state = HCLGE_MAC_ACTIVE; + break; + /* only from set_rx_mode */ + case HCLGE_MAC_TO_DEL: + if (mac_node->state == HCLGE_MAC_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + mac_node->state = HCLGE_MAC_TO_DEL; + } + break; + /* only from tmp_add_list, the mac_node->state won't be + * ACTIVE. + */ + case HCLGE_MAC_ACTIVE: + if (mac_node->state == HCLGE_MAC_TO_ADD) + mac_node->state = HCLGE_MAC_ACTIVE; + + break; + } +} + +int hclge_update_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_NODE_STATE state, + enum HCLGE_MAC_ADDR_TYPE mac_type, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_node *mac_node; + struct list_head *list; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + /* if the mac addr is already in the mac list, no need to add a new + * one into it, just check the mac addr state, convert it to a new + * new state, or just remove it, or do nothing. + */ + mac_node = hclge_find_mac_node(list, addr); + if (mac_node) { + hclge_update_mac_node(mac_node, state); + spin_unlock_bh(&vport->mac_list_lock); + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + return 0; + } + + /* if this address is never added, unnecessary to delete */ + if (state == HCLGE_MAC_TO_DEL) { + spin_unlock_bh(&vport->mac_list_lock); + dev_err(&hdev->pdev->dev, + "failed to delete address %pM from mac list\n", + addr); + return -ENOENT; + } + + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + spin_unlock_bh(&vport->mac_list_lock); + return -ENOMEM; + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + mac_node->state = state; + ether_addr_copy(mac_node->mac_addr, addr); + list_add_tail(&mac_node->node, list); + + spin_unlock_bh(&vport->mac_list_lock); + + return 0; +} + static int hclge_add_uc_addr(struct hnae3_handle *handle, const unsigned char *addr) { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_add_uc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC, + addr); } int hclge_add_uc_addr_common(struct hclge_vport *vport, @@ -7367,7 +7473,8 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_rm_uc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC, + addr); } int hclge_rm_uc_addr_common(struct hclge_vport *vport, @@ -7392,6 +7499,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, ret = hclge_remove_mac_vlan_tbl(vport, &req); if (!ret) hclge_update_umv_space(vport, true); + else if (ret == -ENOENT) + ret = 0; return ret; } @@ -7401,7 +7510,8 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_add_mc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC, + addr); } int hclge_add_mc_addr_common(struct hclge_vport *vport, @@ -7444,7 +7554,8 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_rm_mc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC, + addr); } int hclge_rm_mc_addr_common(struct hclge_vport *vport, @@ -7479,111 +7590,328 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, /* Not all the vfid is zero, update the vfid */ status = hclge_add_mac_vlan_tbl(vport, &req, desc); - } else { - /* Maybe this mac address is in mta table, but it cannot be - * deleted here because an entry of mta represents an address - * range rather than a specific address. the delete action to - * all entries will take effect in update_mta_status called by - * hns3_nic_set_rx_mode. - */ + } else if (status == -ENOENT) { status = 0; } return status; } -void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, - enum HCLGE_MAC_ADDR_TYPE mac_type) +static void hclge_sync_vport_mac_list(struct hclge_vport *vport, + struct list_head *list, + int (*sync)(struct hclge_vport *, + const unsigned char *)) { - struct hclge_vport_mac_addr_cfg *mac_cfg; - struct list_head *list; + struct hclge_mac_node *mac_node, *tmp; + int ret; - if (!vport->vport_id) - return; + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = sync(vport, mac_node->mac_addr); + if (!ret) { + mac_node->state = HCLGE_MAC_ACTIVE; + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + break; + } + } +} - mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL); - if (!mac_cfg) - return; +static void hclge_unsync_vport_mac_list(struct hclge_vport *vport, + struct list_head *list, + int (*unsync)(struct hclge_vport *, + const unsigned char *)) +{ + struct hclge_mac_node *mac_node, *tmp; + int ret; - mac_cfg->hd_tbl_status = true; - memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN); + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unsync(vport, mac_node->mac_addr); + if (!ret || ret == -ENOENT) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + break; + } + } +} - list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &vport->uc_mac_list : &vport->mc_mac_list; +static void hclge_sync_from_add_list(struct list_head *add_list, + struct list_head *mac_list) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; - list_add_tail(&mac_cfg->node, list); + list_for_each_entry_safe(mac_node, tmp, add_list, node) { + /* if the mac address from tmp_add_list is not in the + * uc/mc_mac_list, it means have received a TO_DEL request + * during the time window of adding the mac address into mac + * table. if mac_node state is ACTIVE, then change it to TO_DEL, + * then it will be removed at next time. else it must be TO_ADD, + * this address hasn't been added into mac table, + * so just remove the mac node. + */ + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + hclge_update_mac_node(new_node, mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_DEL; + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } } -void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, - bool is_write_tbl, - enum HCLGE_MAC_ADDR_TYPE mac_type) +static void hclge_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) { - struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; - struct list_head *list; - bool uc_flag, mc_flag; + struct hclge_mac_node *mac_node, *tmp, *new_node; - list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &vport->uc_mac_list : &vport->mc_mac_list; + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + /* If the mac addr exists in the mac list, it means + * received a new TO_ADD request during the time window + * of configuring the mac address. For the mac node + * state is TO_ADD, and the address is already in the + * in the hardware(due to delete fail), so we just need + * to change the mac node state to ACTIVE. + */ + new_node->state = HCLGE_MAC_ACTIVE; + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } + } +} - uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC; - mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; +static void hclge_sync_vport_mac_table(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; + struct list_head tmp_add_list, tmp_del_list; + struct list_head *list; - list_for_each_entry_safe(mac_cfg, tmp, list, node) { - if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) { - if (uc_flag && mac_cfg->hd_tbl_status) - hclge_rm_uc_addr_common(vport, mac_addr); + INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); - if (mc_flag && mac_cfg->hd_tbl_status) - hclge_rm_mc_addr_common(vport, mac_addr); + /* move the mac addr to the tmp_add_list and tmp_del_list, then + * we can add/delete these mac addr outside the spin lock + */ + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; - list_del(&mac_cfg->node); - kfree(mac_cfg); + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + list_del(&mac_node->node); + list_add_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); + new_node->state = mac_node->state; + list_add_tail(&new_node->node, &tmp_add_list); + break; + default: break; } } + +stop_traverse: + spin_unlock_bh(&vport->mac_list_lock); + + /* delete first, in order to get max mac table space for adding */ + if (mac_type == HCLGE_MAC_ADDR_UC) { + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_uc_addr_common); + hclge_sync_vport_mac_list(vport, &tmp_add_list, + hclge_add_uc_addr_common); + } else { + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_mc_addr_common); + hclge_sync_vport_mac_list(vport, &tmp_add_list, + hclge_add_mc_addr_common); + } + + /* if some mac addresses were added/deleted fail, move back to the + * mac_list, and retry at next time. + */ + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + hclge_sync_from_add_list(&tmp_add_list, list); + + spin_unlock_bh(&vport->mac_list_lock); +} + +static bool hclge_need_sync_mac_table(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + if (test_bit(vport->vport_id, hdev->vport_config_block)) + return false; + + if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) + return true; + + return false; +} + +static void hclge_sync_mac_table(struct hclge_dev *hdev) +{ + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + if (!hclge_need_sync_mac_table(vport)) + continue; + + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC); + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC); + } } void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, enum HCLGE_MAC_ADDR_TYPE mac_type) { - struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; - struct list_head *list; + int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); + struct hclge_mac_node *mac_cfg, *tmp; + struct hclge_dev *hdev = vport->back; + struct list_head tmp_del_list, *list; + int ret; - list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &vport->uc_mac_list : &vport->mc_mac_list; + if (mac_type == HCLGE_MAC_ADDR_UC) { + list = &vport->uc_mac_list; + unsync = hclge_rm_uc_addr_common; + } else { + list = &vport->mc_mac_list; + unsync = hclge_rm_mc_addr_common; + } - list_for_each_entry_safe(mac_cfg, tmp, list, node) { - if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status) - hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr); + INIT_LIST_HEAD(&tmp_del_list); - if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status) - hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr); + if (!is_del_list) + set_bit(vport->vport_id, hdev->vport_config_block); - mac_cfg->hd_tbl_status = false; - if (is_del_list) { + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + switch (mac_cfg->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: list_del(&mac_cfg->node); - kfree(mac_cfg); + list_add_tail(&mac_cfg->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + if (is_del_list) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + break; } } + + spin_unlock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) { + ret = unsync(vport, mac_cfg->mac_addr); + if (!ret || ret == -ENOENT) { + /* clear all mac addr from hardware, but remain these + * mac addr in the mac list, and restore them after + * vf reset finished. + */ + if (!is_del_list && + mac_cfg->state == HCLGE_MAC_ACTIVE) { + mac_cfg->state = HCLGE_MAC_TO_ADD; + } else { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + } else if (is_del_list) { + mac_cfg->state = HCLGE_MAC_TO_DEL; + } + } + + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + + spin_unlock_bh(&vport->mac_list_lock); +} + +/* remove all mac address when uninitailize */ +static void hclge_uninit_vport_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_mac_node *mac_node, *tmp; + struct hclge_dev *hdev = vport->back; + struct list_head tmp_del_list, *list; + + INIT_LIST_HEAD(&tmp_del_list); + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: + list_del(&mac_node->node); + list_add_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + list_del(&mac_node->node); + kfree(mac_node); + break; + } + } + + spin_unlock_bh(&vport->mac_list_lock); + + if (mac_type == HCLGE_MAC_ADDR_UC) + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_uc_addr_common); + else + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_mc_addr_common); + + if (!list_empty(&tmp_del_list)) + dev_warn(&hdev->pdev->dev, + "uninit %s mac list for vport %u not completely.\n", + mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc", + vport->vport_id); + + list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } } -void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) +static void hclge_uninit_mac_table(struct hclge_dev *hdev) { - struct hclge_vport_mac_addr_cfg *mac, *tmp; struct hclge_vport *vport; int i; for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; - list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) { - list_del(&mac->node); - kfree(mac); - } - - list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) { - list_del(&mac->node); - kfree(mac); - } + hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC); + hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC); } } @@ -7747,12 +8075,57 @@ static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) ether_addr_copy(p, hdev->hw.mac.mac_addr); } +int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, + const u8 *old_addr, const u8 *new_addr) +{ + struct list_head *list = &vport->uc_mac_list; + struct hclge_mac_node *old_node, *new_node; + + new_node = hclge_find_mac_node(list, new_addr); + if (!new_node) { + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + return -ENOMEM; + + new_node->state = HCLGE_MAC_TO_ADD; + ether_addr_copy(new_node->mac_addr, new_addr); + list_add(&new_node->node, list); + } else { + if (new_node->state == HCLGE_MAC_TO_DEL) + new_node->state = HCLGE_MAC_ACTIVE; + + /* make sure the new addr is in the list head, avoid dev + * addr may be not re-added into mac table for the umv space + * limitation after global/imp reset which will clear mac + * table by hardware. + */ + list_move(&new_node->node, list); + } + + if (old_addr && !ether_addr_equal(old_addr, new_addr)) { + old_node = hclge_find_mac_node(list, old_addr); + if (old_node) { + if (old_node->state == HCLGE_MAC_TO_ADD) { + list_del(&old_node->node); + kfree(old_node); + } else { + old_node->state = HCLGE_MAC_TO_DEL; + } + } + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + return 0; +} + static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, bool is_first) { const unsigned char *new_addr = (const unsigned char *)p; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + unsigned char *old_addr = NULL; int ret; /* mac addr check */ @@ -7760,39 +8133,42 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, is_broadcast_ether_addr(new_addr) || is_multicast_ether_addr(new_addr)) { dev_err(&hdev->pdev->dev, - "Change uc mac err! invalid mac:%pM.\n", + "change uc mac err! invalid mac: %pM.\n", new_addr); return -EINVAL; } - if ((!is_first || is_kdump_kernel()) && - hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) - dev_warn(&hdev->pdev->dev, - "remove old uc mac address fail.\n"); - - ret = hclge_add_uc_addr(handle, new_addr); + ret = hclge_pause_addr_cfg(hdev, new_addr); if (ret) { dev_err(&hdev->pdev->dev, - "add uc mac address fail, ret =%d.\n", + "failed to configure mac pause address, ret = %d\n", ret); - - if (!is_first && - hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) - dev_err(&hdev->pdev->dev, - "restore uc mac address fail.\n"); - - return -EIO; + return ret; } - ret = hclge_pause_addr_cfg(hdev, new_addr); + if (!is_first) + old_addr = hdev->hw.mac.mac_addr; + + spin_lock_bh(&vport->mac_list_lock); + ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); if (ret) { dev_err(&hdev->pdev->dev, - "configure mac pause address fail, ret =%d.\n", - ret); - return -EIO; - } + "failed to change the mac addr:%pM, ret = %d\n", + new_addr, ret); + spin_unlock_bh(&vport->mac_list_lock); + + if (!is_first) + hclge_pause_addr_cfg(hdev, old_addr); + return ret; + } + /* we must update dev addr with spin lock protect, preventing dev addr + * being removed by set_rx_mode path. + */ ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + spin_unlock_bh(&vport->mac_list_lock); + + hclge_task_schedule(hdev, 0); return 0; } @@ -8408,6 +8784,37 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle) } } +/* For global reset and imp reset, hardware will clear the mac table, + * so we change the mac address state from ACTIVE to TO_ADD, then they + * can be restored in the service task after reset complete. Furtherly, + * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to + * be restored after reset, so just remove these mac nodes from mac_list. + */ +static void hclge_mac_node_convert_for_reset(struct list_head *list) +{ + struct hclge_mac_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_ADD; + } else if (mac_node->state == HCLGE_MAC_TO_DEL) { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +void hclge_restore_mac_table_common(struct hclge_vport *vport) +{ + spin_lock_bh(&vport->mac_list_lock); + + hclge_mac_node_convert_for_reset(&vport->uc_mac_list); + hclge_mac_node_convert_for_reset(&vport->mc_mac_list); + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + spin_unlock_bh(&vport->mac_list_lock); +} + int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -9899,6 +10306,15 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_DOWN, &hdev->state); hclge_stats_clear(hdev); + /* NOTE: pf reset needn't to clear or restore pf and vf table entry. + * so here should not clean table in memory. + */ + if (hdev->reset_type == HNAE3_IMP_RESET || + hdev->reset_type == HNAE3_GLOBAL_RESET) { + bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); + hclge_reset_umv_space(hdev); + } + memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); @@ -9914,8 +10330,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - hclge_reset_umv_space(hdev); - ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -10011,6 +10425,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_clear_vf_vlan(hdev); hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); + hclge_uninit_mac_table(hdev); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); @@ -10028,7 +10443,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); - hclge_uninit_vport_mac_table(hdev); hclge_uninit_vport_vlan_table(hdev); ae_dev->priv = NULL; } |