diff options
Diffstat (limited to 'drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c')
| -rw-r--r-- | drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c | 456 |
1 files changed, 223 insertions, 233 deletions
diff --git a/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c index 6579dabab78c..aa8b2badbb52 100644 --- a/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c @@ -6,11 +6,11 @@ #include "i40evf_client.h" /* busy wait delay in msec */ -#define I40EVF_BUSY_WAIT_DELAY 10 -#define I40EVF_BUSY_WAIT_COUNT 50 +#define IAVF_BUSY_WAIT_DELAY 10 +#define IAVF_BUSY_WAIT_COUNT 50 /** - * i40evf_send_pf_msg + * iavf_send_pf_msg * @adapter: adapter structure * @op: virtual channel opcode * @msg: pointer to message buffer @@ -18,44 +18,44 @@ * * Send message to PF and print status if failure. **/ -static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, - enum virtchnl_ops op, u8 *msg, u16 len) +static int iavf_send_pf_msg(struct iavf_adapter *adapter, + enum virtchnl_ops op, u8 *msg, u16 len) { struct i40e_hw *hw = &adapter->hw; i40e_status err; - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) return 0; /* nothing to see here, move along */ - err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); + err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); if (err) dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", - op, i40evf_stat_str(hw, err), - i40evf_aq_str(hw, hw->aq.asq_last_status)); + op, iavf_stat_str(hw, err), + iavf_aq_str(hw, hw->aq.asq_last_status)); return err; } /** - * i40evf_send_api_ver + * iavf_send_api_ver * @adapter: adapter structure * * Send API version admin queue message to the PF. The reply is not checked * in this function. Returns 0 if the message was successfully * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. **/ -int i40evf_send_api_ver(struct i40evf_adapter *adapter) +int iavf_send_api_ver(struct iavf_adapter *adapter) { struct virtchnl_version_info vvi; vvi.major = VIRTCHNL_VERSION_MAJOR; vvi.minor = VIRTCHNL_VERSION_MINOR; - return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, - sizeof(vvi)); + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, + sizeof(vvi)); } /** - * i40evf_verify_api_ver + * iavf_verify_api_ver * @adapter: adapter structure * * Compare API versions with the PF. Must be called after admin queue is @@ -63,7 +63,7 @@ int i40evf_send_api_ver(struct i40evf_adapter *adapter) * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors * from the firmware are propagated. **/ -int i40evf_verify_api_ver(struct i40evf_adapter *adapter) +int iavf_verify_api_ver(struct iavf_adapter *adapter) { struct virtchnl_version_info *pf_vvi; struct i40e_hw *hw = &adapter->hw; @@ -71,7 +71,7 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) enum virtchnl_ops op; i40e_status err; - event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { err = -ENOMEM; @@ -79,8 +79,8 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) } while (1) { - err = i40evf_clean_arq_element(hw, &event, NULL); - /* When the AQ is empty, i40evf_clean_arq_element will return + err = iavf_clean_arq_element(hw, &event, NULL); + /* When the AQ is empty, iavf_clean_arq_element will return * nonzero and this loop will terminate. */ if (err) @@ -118,14 +118,14 @@ out: } /** - * i40evf_send_vf_config_msg + * iavf_send_vf_config_msg * @adapter: adapter structure * * Send VF configuration request admin queue message to the PF. The reply * is not checked in this function. Returns 0 if the message was * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. **/ -int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) +int iavf_send_vf_config_msg(struct iavf_adapter *adapter) { u32 caps; @@ -142,45 +142,45 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_ADQ; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; - adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) - return i40evf_send_pf_msg(adapter, + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, (u8 *)&caps, sizeof(caps)); else - return i40evf_send_pf_msg(adapter, + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, NULL, 0); } /** - * i40evf_validate_num_queues + * iavf_validate_num_queues * @adapter: adapter structure * * Validate that the number of queues the PF has sent in * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. **/ -static void i40evf_validate_num_queues(struct i40evf_adapter *adapter) +static void iavf_validate_num_queues(struct iavf_adapter *adapter) { - if (adapter->vf_res->num_queue_pairs > I40EVF_MAX_REQ_QUEUES) { + if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { struct virtchnl_vsi_resource *vsi_res; int i; dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", adapter->vf_res->num_queue_pairs, - I40EVF_MAX_REQ_QUEUES); + IAVF_MAX_REQ_QUEUES); dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", - I40EVF_MAX_REQ_QUEUES); - adapter->vf_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES; + IAVF_MAX_REQ_QUEUES); + adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; for (i = 0; i < adapter->vf_res->num_vsis; i++) { vsi_res = &adapter->vf_res->vsi_res[i]; - vsi_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES; + vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; } } } /** - * i40evf_get_vf_config + * iavf_get_vf_config * @adapter: private adapter structure * * Get VF configuration from PF and populate hw structure. Must be called after @@ -188,7 +188,7 @@ static void i40evf_validate_num_queues(struct i40evf_adapter *adapter) * with maximum timeout. Response from PF is returned in the buffer for further * processing by the caller. **/ -int i40evf_get_vf_config(struct i40evf_adapter *adapter) +int iavf_get_vf_config(struct iavf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; @@ -206,10 +206,10 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) } while (1) { - /* When the AQ is empty, i40evf_clean_arq_element will return + /* When the AQ is empty, iavf_clean_arq_element will return * nonzero and this loop will terminate. */ - err = i40evf_clean_arq_element(hw, &event, NULL); + err = iavf_clean_arq_element(hw, &event, NULL); if (err) goto out_alloc; op = @@ -225,8 +225,8 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) * we aren't getting too many queues */ if (!err) - i40evf_validate_num_queues(adapter); - i40e_vf_parse_hw_config(hw, adapter->vf_res); + iavf_validate_num_queues(adapter); + iavf_vf_parse_hw_config(hw, adapter->vf_res); out_alloc: kfree(event.msg_buf); out: @@ -234,12 +234,12 @@ out: } /** - * i40evf_configure_queues + * iavf_configure_queues * @adapter: adapter structure * * Request that the PF set up our (previously allocated) queues. **/ -void i40evf_configure_queues(struct i40evf_adapter *adapter) +void iavf_configure_queues(struct iavf_adapter *adapter) { struct virtchnl_vsi_queue_config_info *vqci; struct virtchnl_queue_pair_info *vqpi; @@ -260,7 +260,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) return; /* Limit maximum frame size when jumbo frames is not enabled */ - if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) && + if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && (adapter->netdev->mtu <= ETH_DATA_LEN)) max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; @@ -286,19 +286,19 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) vqpi++; } - adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, - (u8 *)vqci, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + (u8 *)vqci, len); kfree(vqci); } /** - * i40evf_enable_queues + * iavf_enable_queues * @adapter: adapter structure * * Request that the PF enable all of our queues. **/ -void i40evf_enable_queues(struct i40evf_adapter *adapter) +void iavf_enable_queues(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; @@ -312,18 +312,18 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, - (u8 *)&vqs, sizeof(vqs)); + adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); } /** - * i40evf_disable_queues + * iavf_disable_queues * @adapter: adapter structure * * Request that the PF disable all of our queues. **/ -void i40evf_disable_queues(struct i40evf_adapter *adapter) +void iavf_disable_queues(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; @@ -337,19 +337,19 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, - (u8 *)&vqs, sizeof(vqs)); + adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); } /** - * i40evf_map_queues + * iavf_map_queues * @adapter: adapter structure * * Request that the PF map queues to interrupt vectors. Misc causes, including * admin queue, are always mapped to vector 0. **/ -void i40evf_map_queues(struct i40evf_adapter *adapter) +void iavf_map_queues(struct iavf_adapter *adapter) { struct virtchnl_irq_map_info *vimi; struct virtchnl_vector_map *vecmap; @@ -393,21 +393,21 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) vecmap->txq_map = 0; vecmap->rxq_map = 0; - adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, - (u8 *)vimi, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, + (u8 *)vimi, len); kfree(vimi); } /** - * i40evf_request_queues + * iavf_request_queues * @adapter: adapter structure * @num: number of requested queues * * We get a default number of queues from the PF. This enables us to request a * different number. Returns 0 on success, negative on failure **/ -int i40evf_request_queues(struct i40evf_adapter *adapter, int num) +int iavf_request_queues(struct iavf_adapter *adapter, int num) { struct virtchnl_vf_res_request vfres; @@ -421,22 +421,22 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num) vfres.num_queue_pairs = num; adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; - return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, - (u8 *)&vfres, sizeof(vfres)); + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, + (u8 *)&vfres, sizeof(vfres)); } /** - * i40evf_add_ether_addrs + * iavf_add_ether_addrs * @adapter: adapter structure * * Request that the PF add one or more addresses to our filters. **/ -void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) +void iavf_add_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; int len, i = 0, count = 0; - struct i40evf_mac_filter *f; + struct iavf_mac_filter *f; bool more = false; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { @@ -453,7 +453,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -461,9 +461,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_ether_addr_list) + (count * sizeof(struct virtchnl_ether_addr)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr); len = sizeof(struct virtchnl_ether_addr_list) + @@ -489,25 +489,25 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, - (u8 *)veal, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, + (u8 *)veal, len); kfree(veal); } /** - * i40evf_del_ether_addrs + * iavf_del_ether_addrs * @adapter: adapter structure * * Request that the PF remove one or more addresses from our filters. **/ -void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) +void iavf_del_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; - struct i40evf_mac_filter *f, *ftmp; + struct iavf_mac_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; @@ -525,7 +525,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -533,9 +533,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_ether_addr_list) + (count * sizeof(struct virtchnl_ether_addr)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr); len = sizeof(struct virtchnl_ether_addr_list) + @@ -561,26 +561,26 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, - (u8 *)veal, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, + (u8 *)veal, len); kfree(veal); } /** - * i40evf_add_vlans + * iavf_add_vlans * @adapter: adapter structure * * Request that the PF add one or more VLAN filters to our VSI. **/ -void i40evf_add_vlans(struct i40evf_adapter *adapter) +void iavf_add_vlans(struct iavf_adapter *adapter) { struct virtchnl_vlan_filter_list *vvfl; int len, i = 0, count = 0; - struct i40evf_vlan_filter *f; + struct iavf_vlan_filter *f; bool more = false; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { @@ -597,7 +597,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -605,9 +605,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_vlan_filter_list) + (count * sizeof(u16)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_vlan_filter_list)) / sizeof(u16); len = sizeof(struct virtchnl_vlan_filter_list) + @@ -632,24 +632,24 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } /** - * i40evf_del_vlans + * iavf_del_vlans * @adapter: adapter structure * * Request that the PF remove one or more VLAN filters from our VSI. **/ -void i40evf_del_vlans(struct i40evf_adapter *adapter) +void iavf_del_vlans(struct iavf_adapter *adapter) { struct virtchnl_vlan_filter_list *vvfl; - struct i40evf_vlan_filter *f, *ftmp; + struct iavf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; @@ -667,7 +667,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) count++; } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } @@ -675,9 +675,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) len = sizeof(struct virtchnl_vlan_filter_list) + (count * sizeof(u16)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { + if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_vlan_filter_list)) / sizeof(u16); len = sizeof(struct virtchnl_vlan_filter_list) + @@ -703,22 +703,22 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) } } if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); } /** - * i40evf_set_promiscuous + * iavf_set_promiscuous * @adapter: adapter structure * @flags: bitmask to control unicast/multicast promiscuous. * * Request that the PF enable promiscuous mode for our VSI. **/ -void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) +void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) { struct virtchnl_promisc_info vpi; int promisc_all; @@ -733,39 +733,39 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) promisc_all = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC; if ((flags & promisc_all) == promisc_all) { - adapter->flags |= I40EVF_FLAG_PROMISC_ON; - adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC; + adapter->flags |= IAVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC; dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); } if (flags & FLAG_VF_MULTICAST_PROMISC) { - adapter->flags |= I40EVF_FLAG_ALLMULTI_ON; - adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + adapter->flags |= IAVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); } if (!flags) { - adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON | - I40EVF_FLAG_ALLMULTI_ON); - adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC | - I40EVF_FLAG_AQ_RELEASE_ALLMULTI); + adapter->flags &= ~(IAVF_FLAG_PROMISC_ON | + IAVF_FLAG_ALLMULTI_ON); + adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC | + IAVF_FLAG_AQ_RELEASE_ALLMULTI); dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); } adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; vpi.vsi_id = adapter->vsi_res->vsi_id; vpi.flags = flags; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - (u8 *)&vpi, sizeof(vpi)); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + (u8 *)&vpi, sizeof(vpi)); } /** - * i40evf_request_stats + * iavf_request_stats * @adapter: adapter structure * * Request VSI statistics from PF. **/ -void i40evf_request_stats(struct i40evf_adapter *adapter) +void iavf_request_stats(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; @@ -776,19 +776,19 @@ void i40evf_request_stats(struct i40evf_adapter *adapter) adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ - if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, - (u8 *)&vqs, sizeof(vqs))) + if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, + (u8 *)&vqs, sizeof(vqs))) /* if the request failed, don't lock out others */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; } /** - * i40evf_get_hena + * iavf_get_hena * @adapter: adapter structure * * Request hash enable capabilities from PF **/ -void i40evf_get_hena(struct i40evf_adapter *adapter) +void iavf_get_hena(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -797,18 +797,17 @@ void i40evf_get_hena(struct i40evf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); } /** - * i40evf_set_hena + * iavf_set_hena * @adapter: adapter structure * * Request the PF to set our RSS hash capabilities **/ -void i40evf_set_hena(struct i40evf_adapter *adapter) +void iavf_set_hena(struct iavf_adapter *adapter) { struct virtchnl_rss_hena vrh; @@ -820,18 +819,18 @@ void i40evf_set_hena(struct i40evf_adapter *adapter) } vrh.hena = adapter->hena; adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, - (u8 *)&vrh, sizeof(vrh)); + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, + sizeof(vrh)); } /** - * i40evf_set_rss_key + * iavf_set_rss_key * @adapter: adapter structure * * Request the PF to set our RSS hash key **/ -void i40evf_set_rss_key(struct i40evf_adapter *adapter) +void iavf_set_rss_key(struct iavf_adapter *adapter) { struct virtchnl_rss_key *vrk; int len; @@ -852,19 +851,18 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter) memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, - (u8 *)vrk, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); kfree(vrk); } /** - * i40evf_set_rss_lut + * iavf_set_rss_lut * @adapter: adapter structure * * Request the PF to set our RSS lookup table **/ -void i40evf_set_rss_lut(struct i40evf_adapter *adapter) +void iavf_set_rss_lut(struct iavf_adapter *adapter) { struct virtchnl_rss_lut *vrl; int len; @@ -884,19 +882,18 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter) vrl->lut_entries = adapter->rss_lut_size; memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, - (u8 *)vrl, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); kfree(vrl); } /** - * i40evf_enable_vlan_stripping + * iavf_enable_vlan_stripping * @adapter: adapter structure * * Request VLAN header stripping to be enabled **/ -void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter) +void iavf_enable_vlan_stripping(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -905,18 +902,17 @@ void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0); } /** - * i40evf_disable_vlan_stripping + * iavf_disable_vlan_stripping * @adapter: adapter structure * * Request VLAN header stripping to be disabled **/ -void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter) +void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -925,18 +921,17 @@ void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); } /** - * i40evf_print_link_message - print link up or down + * iavf_print_link_message - print link up or down * @adapter: adapter structure * * Log a message telling the world of our wonderous link status */ -static void i40evf_print_link_message(struct i40evf_adapter *adapter) +static void iavf_print_link_message(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; char *speed = "Unknown "; @@ -973,13 +968,13 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter) } /** - * i40evf_enable_channel + * iavf_enable_channel * @adapter: adapter structure * * Request that the PF enable channels as specified by * the user via tc tool. **/ -void i40evf_enable_channels(struct i40evf_adapter *adapter) +void iavf_enable_channels(struct iavf_adapter *adapter) { struct virtchnl_tc_info *vti = NULL; u16 len; @@ -1007,22 +1002,21 @@ void i40evf_enable_channels(struct i40evf_adapter *adapter) adapter->ch_config.ch_info[i].max_tx_rate; } - adapter->ch_config.state = __I40EVF_TC_RUNNING; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_RUNNING; + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, - (u8 *)vti, len); + adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len); kfree(vti); } /** - * i40evf_disable_channel + * iavf_disable_channel * @adapter: adapter structure * * Request that the PF disable channels that are configured **/ -void i40evf_disable_channels(struct i40evf_adapter *adapter) +void iavf_disable_channels(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -1031,23 +1025,22 @@ void i40evf_disable_channels(struct i40evf_adapter *adapter) return; } - adapter->ch_config.state = __I40EVF_TC_INVALID; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_INVALID; + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, - NULL, 0); + adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0); } /** - * i40evf_print_cloud_filter + * iavf_print_cloud_filter * @adapter: adapter structure * @f: cloud filter to print * * Print the cloud filter **/ -static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter, - struct virtchnl_filter *f) +static void iavf_print_cloud_filter(struct iavf_adapter *adapter, + struct virtchnl_filter *f) { switch (f->flow_type) { case VIRTCHNL_TCP_V4_FLOW: @@ -1074,15 +1067,15 @@ static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter, } /** - * i40evf_add_cloud_filter + * iavf_add_cloud_filter * @adapter: adapter structure * * Request that the PF add cloud filters as specified * by the user via tc tool. **/ -void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) +void iavf_add_cloud_filter(struct iavf_adapter *adapter) { - struct i40evf_cloud_filter *cf; + struct iavf_cloud_filter *cf; struct virtchnl_filter *f; int len = 0, count = 0; @@ -1099,7 +1092,7 @@ void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) } } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; return; } adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; @@ -1113,25 +1106,24 @@ void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) if (cf->add) { memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); cf->add = false; - cf->state = __I40EVF_CF_ADD_PENDING; - i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_ADD_CLOUD_FILTER, - (u8 *)f, len); + cf->state = __IAVF_CF_ADD_PENDING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, + (u8 *)f, len); } } kfree(f); } /** - * i40evf_del_cloud_filter + * iavf_del_cloud_filter * @adapter: adapter structure * * Request that the PF delete cloud filters as specified * by the user via tc tool. **/ -void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) +void iavf_del_cloud_filter(struct iavf_adapter *adapter) { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp; struct virtchnl_filter *f; int len = 0, count = 0; @@ -1148,7 +1140,7 @@ void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) } } if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; return; } adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; @@ -1162,30 +1154,29 @@ void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) if (cf->del) { memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); cf->del = false; - cf->state = __I40EVF_CF_DEL_PENDING; - i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_DEL_CLOUD_FILTER, - (u8 *)f, len); + cf->state = __IAVF_CF_DEL_PENDING; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, + (u8 *)f, len); } } kfree(f); } /** - * i40evf_request_reset + * iavf_request_reset * @adapter: adapter structure * * Request that the PF reset this VF. No response is expected. **/ -void i40evf_request_reset(struct i40evf_adapter *adapter) +void iavf_request_reset(struct iavf_adapter *adapter) { /* Don't check CURRENT_OP - this is always higher priority */ - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); adapter->current_op = VIRTCHNL_OP_UNKNOWN; } /** - * i40evf_virtchnl_completion + * iavf_virtchnl_completion * @adapter: adapter structure * @v_opcode: opcode sent by PF * @v_retval: retval sent by PF @@ -1196,10 +1187,10 @@ void i40evf_request_reset(struct i40evf_adapter *adapter) * wait, we fire off our requests and assume that no errors will be returned. * This function handles the reply messages. **/ -void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen) +void iavf_virtchnl_completion(struct iavf_adapter *adapter, + enum virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen) { struct net_device *netdev = adapter->netdev; @@ -1224,7 +1215,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, * after we enable queues and actually prepared * to send traffic. */ - if (adapter->state != __I40EVF_RUNNING) + if (adapter->state != __IAVF_RUNNING) break; /* For ADq enabled VF, we reconfigure VSIs and @@ -1232,7 +1223,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, * queues are enabled. */ if (adapter->flags & - I40EVF_FLAG_QUEUES_DISABLED) + IAVF_FLAG_QUEUES_DISABLED) break; } @@ -1244,12 +1235,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); } - i40evf_print_link_message(adapter); + iavf_print_link_message(adapter); break; case VIRTCHNL_EVENT_RESET_IMPENDING: dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n"); - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { - adapter->flags |= I40EVF_FLAG_RESET_PENDING; + if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { + adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); schedule_work(&adapter->reset_task); } @@ -1265,48 +1256,48 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, switch (v_opcode) { case VIRTCHNL_OP_ADD_VLAN: dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_ADD_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_DEL_VLAN: dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_DEL_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); + iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_ENABLE_CHANNELS: dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - adapter->ch_config.state = __I40EVF_TC_INVALID; + iavf_stat_str(&adapter->hw, v_retval)); + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_INVALID; netdev_reset_tc(netdev); netif_tx_start_all_queues(netdev); break; case VIRTCHNL_OP_DISABLE_CHANNELS: dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - adapter->ch_config.state = __I40EVF_TC_RUNNING; + iavf_stat_str(&adapter->hw, v_retval)); + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __IAVF_TC_RUNNING; netif_tx_start_all_queues(netdev); break; case VIRTCHNL_OP_ADD_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp; list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_ADD_PENDING) { - cf->state = __I40EVF_CF_INVALID; + if (cf->state == __IAVF_CF_ADD_PENDING) { + cf->state = __IAVF_CF_INVALID; dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", - i40evf_stat_str(&adapter->hw, - v_retval)); - i40evf_print_cloud_filter(adapter, - &cf->f); + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_cloud_filter(adapter, + &cf->f); list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; @@ -1315,17 +1306,17 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, } break; case VIRTCHNL_OP_DEL_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf; + struct iavf_cloud_filter *cf; list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_DEL_PENDING) { - cf->state = __I40EVF_CF_ACTIVE; + if (cf->state == __IAVF_CF_DEL_PENDING) { + cf->state = __IAVF_CF_ACTIVE; dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", - i40evf_stat_str(&adapter->hw, - v_retval)); - i40evf_print_cloud_filter(adapter, - &cf->f); + iavf_stat_str(&adapter->hw, + v_retval)); + iavf_print_cloud_filter(adapter, + &cf->f); } } } @@ -1333,7 +1324,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", v_retval, - i40evf_stat_str(&adapter->hw, v_retval), + iavf_stat_str(&adapter->hw, v_retval), v_opcode); } } @@ -1360,8 +1351,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); memcpy(adapter->vf_res, msg, min(msglen, len)); - i40evf_validate_num_queues(adapter); - i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + iavf_validate_num_queues(adapter); + iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); if (is_zero_ether_addr(adapter->hw.mac.addr)) { /* restore current mac address */ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); @@ -1371,19 +1362,19 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } - i40evf_process_config(adapter); + iavf_process_config(adapter); } break; case VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ - i40evf_irq_enable(adapter, true); - adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED; + iavf_irq_enable(adapter, true); + adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; break; case VIRTCHNL_OP_DISABLE_QUEUES: - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - if (adapter->state == __I40EVF_DOWN_PENDING) { - adapter->state = __I40EVF_DOWN; + iavf_free_all_tx_resources(adapter); + iavf_free_all_rx_resources(adapter); + if (adapter->state == __IAVF_DOWN_PENDING) { + adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); } break; @@ -1402,8 +1393,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, * care about that. */ if (msglen && CLIENT_ENABLED(adapter)) - i40evf_notify_client_message(&adapter->vsi, - msg, msglen); + iavf_notify_client_message(&adapter->vsi, msg, msglen); break; case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: @@ -1428,26 +1418,26 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, adapter->num_req_queues, vfres->num_queue_pairs); adapter->num_req_queues = 0; - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; } } break; case VIRTCHNL_OP_ADD_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf; + struct iavf_cloud_filter *cf; list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_ADD_PENDING) - cf->state = __I40EVF_CF_ACTIVE; + if (cf->state == __IAVF_CF_ADD_PENDING) + cf->state = __IAVF_CF_ACTIVE; } } break; case VIRTCHNL_OP_DEL_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf, *cftmp; + struct iavf_cloud_filter *cf, *cftmp; list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_DEL_PENDING) { - cf->state = __I40EVF_CF_INVALID; + if (cf->state == __IAVF_CF_DEL_PENDING) { + cf->state = __IAVF_CF_INVALID; list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; |
