summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/idpf/idpf_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_lib.c')
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c139
1 files changed, 113 insertions, 26 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 4eb20ec2accb..2c2a3e85d693 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -88,6 +88,8 @@ void idpf_intr_rel(struct idpf_adapter *adapter)
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
+ kfree(adapter->rdma_msix_entries);
+ adapter->rdma_msix_entries = NULL;
}
/**
@@ -299,13 +301,33 @@ rel_lock:
*/
int idpf_intr_req(struct idpf_adapter *adapter)
{
+ u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0;
u16 default_vports = idpf_get_default_vports(adapter);
int num_q_vecs, total_vecs, num_vec_ids;
- int min_vectors, v_actual, err;
+ int min_vectors, actual_vecs, err;
unsigned int vector;
u16 *vecids;
+ int i;
total_vecs = idpf_get_reserved_vecs(adapter);
+ num_lan_vecs = total_vecs;
+ if (idpf_is_rdma_cap_ena(adapter)) {
+ num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter);
+ min_rdma_vecs = IDPF_MIN_RDMA_VEC;
+
+ if (!num_rdma_vecs) {
+ /* If idpf_get_reserved_rdma_vecs is 0, vectors are
+ * pulled from the LAN pool.
+ */
+ num_rdma_vecs = min_rdma_vecs;
+ } else if (num_rdma_vecs < min_rdma_vecs) {
+ dev_err(&adapter->pdev->dev,
+ "Not enough vectors reserved for RDMA (min: %u, current: %u)\n",
+ min_rdma_vecs, num_rdma_vecs);
+ return -EINVAL;
+ }
+ }
+
num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
@@ -316,52 +338,76 @@ int idpf_intr_req(struct idpf_adapter *adapter)
return -EAGAIN;
}
- min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
- v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
- total_vecs, PCI_IRQ_MSIX);
- if (v_actual < min_vectors) {
- dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
- v_actual);
- err = -EAGAIN;
+ min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
+ min_vectors = min_lan_vecs + min_rdma_vecs;
+ actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
+ total_vecs, PCI_IRQ_MSIX);
+ if (actual_vecs < 0) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n",
+ min_vectors);
+ err = actual_vecs;
goto send_dealloc_vecs;
}
- adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
- GFP_KERNEL);
+ if (idpf_is_rdma_cap_ena(adapter)) {
+ if (actual_vecs < total_vecs) {
+ dev_warn(&adapter->pdev->dev,
+ "Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n",
+ total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC);
+ num_rdma_vecs = IDPF_MIN_RDMA_VEC;
+ }
+ adapter->rdma_msix_entries = kcalloc(num_rdma_vecs,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->rdma_msix_entries) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+ }
+
+ num_lan_vecs = actual_vecs - num_rdma_vecs;
+ adapter->msix_entries = kcalloc(num_lan_vecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
if (!adapter->msix_entries) {
err = -ENOMEM;
- goto free_irq;
+ goto free_rdma_msix;
}
adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
- vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
+ vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL);
if (!vecids) {
err = -ENOMEM;
goto free_msix;
}
- num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
+ num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs,
&adapter->req_vec_chunks->vchunks);
- if (num_vec_ids < v_actual) {
+ if (num_vec_ids < actual_vecs) {
err = -EINVAL;
goto free_vecids;
}
- for (vector = 0; vector < v_actual; vector++) {
+ for (vector = 0; vector < num_lan_vecs; vector++) {
adapter->msix_entries[vector].entry = vecids[vector];
adapter->msix_entries[vector].vector =
pci_irq_vector(adapter->pdev, vector);
}
+ for (i = 0; i < num_rdma_vecs; vector++, i++) {
+ adapter->rdma_msix_entries[i].entry = vecids[vector];
+ adapter->rdma_msix_entries[i].vector =
+ pci_irq_vector(adapter->pdev, vector);
+ }
- adapter->num_req_msix = total_vecs;
- adapter->num_msix_entries = v_actual;
/* 'num_avail_msix' is used to distribute excess vectors to the vports
* after considering the minimum vectors required per each default
* vport
*/
- adapter->num_avail_msix = v_actual - min_vectors;
+ adapter->num_avail_msix = num_lan_vecs - min_lan_vecs;
+ adapter->num_msix_entries = num_lan_vecs;
+ if (idpf_is_rdma_cap_ena(adapter))
+ adapter->num_rdma_msix_entries = num_rdma_vecs;
/* Fill MSIX vector lifo stack with vector indexes */
err = idpf_init_vector_stack(adapter);
@@ -383,6 +429,9 @@ free_vecids:
free_msix:
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
+free_rdma_msix:
+ kfree(adapter->rdma_msix_entries);
+ adapter->rdma_msix_entries = NULL;
free_irq:
pci_free_irq_vectors(adapter->pdev);
send_dealloc_vecs:
@@ -755,6 +804,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_FLOW_STEER) &&
+ idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER))
+ dflt_features |= NETIF_F_NTUPLE;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
csum_offloads |= NETIF_F_IP_CSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
@@ -972,6 +1025,8 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
struct idpf_adapter *adapter = vport->adapter;
unsigned int i = vport->idx;
+ idpf_idc_deinit_vport_aux_device(vport->vdev_info);
+
idpf_deinit_mac_addr(vport);
idpf_vport_stop(vport);
@@ -1079,8 +1134,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
if (!vport)
return vport;
+ num_max_q = max(max_q->max_txq, max_q->max_rxq);
if (!adapter->vport_config[idx]) {
struct idpf_vport_config *vport_config;
+ struct idpf_q_coalesce *q_coal;
vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
if (!vport_config) {
@@ -1089,6 +1146,21 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
return NULL;
}
+ q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL);
+ if (!q_coal) {
+ kfree(vport_config);
+ kfree(vport);
+
+ return NULL;
+ }
+ for (int i = 0; i < num_max_q; i++) {
+ q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF;
+ q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF;
+ }
+ vport_config->user_config.q_coalesce = q_coal;
+
adapter->vport_config[idx] = vport_config;
}
@@ -1098,7 +1170,6 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
vport->default_vport = adapter->num_alloc_vports <
idpf_get_default_vports(adapter);
- num_max_q = max(max_q->max_txq, max_q->max_rxq);
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
if (!vport->q_vector_idxs)
goto free_vport;
@@ -1481,6 +1552,7 @@ void idpf_init_task(struct work_struct *work)
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
+ INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
err = idpf_check_supported_desc_ids(vport);
if (err) {
@@ -1738,6 +1810,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
bool is_reset = idpf_is_reset_detected(adapter);
+ idpf_idc_issue_reset_event(adapter->cdev_info);
+
idpf_set_vport_state(adapter);
idpf_vc_core_deinit(adapter);
if (!is_reset)
@@ -1785,6 +1859,10 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
unlock_mutex:
mutex_unlock(&adapter->vport_ctrl_lock);
+ /* Wait until all vports are created to init RDMA CORE AUX */
+ if (!err)
+ err = idpf_idc_init(adapter);
+
return err;
}
@@ -1868,6 +1946,9 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
idpf_vport_calc_num_q_desc(new_vport);
break;
case IDPF_SR_MTU_CHANGE:
+ idpf_idc_vdev_mtu_event(vport->vdev_info,
+ IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE);
+ break;
case IDPF_SR_RSC_CHANGE:
break;
default:
@@ -1912,9 +1993,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
if (current_state == __IDPF_VPORT_UP)
err = idpf_vport_open(vport);
- kfree(new_vport);
-
- return err;
+ goto free_vport;
err_reset:
idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq,
@@ -1927,6 +2006,10 @@ err_open:
free_vport:
kfree(new_vport);
+ if (reset_cause == IDPF_SR_MTU_CHANGE)
+ idpf_idc_vdev_mtu_event(vport->vdev_info,
+ IIDC_RDMA_EVENT_AFTER_MTU_CHANGE);
+
return err;
}
@@ -2314,8 +2397,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
struct idpf_adapter *adapter = hw->back;
size_t sz = ALIGN(size, 4096);
- mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
- &mem->pa, GFP_KERNEL);
+ /* The control queue resources are freed under a spinlock, contiguous
+ * pages will avoid IOMMU remapping and the use vmap (and vunmap in
+ * dma_free_*() path.
+ */
+ mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
+ GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = sz;
return mem->va;
@@ -2330,8 +2417,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
{
struct idpf_adapter *adapter = hw->back;
- dma_free_coherent(&adapter->pdev->dev, mem->size,
- mem->va, mem->pa);
+ dma_free_attrs(&adapter->pdev->dev, mem->size,
+ mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = 0;
mem->va = NULL;
mem->pa = 0;