summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/amazon/ena/ena_netdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/amazon/ena/ena_netdev.c')
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c88
1 files changed, 49 insertions, 39 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 664e3ed97ea9..c487d2a7d6dd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -158,7 +158,6 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->adapter = adapter;
ring->ena_dev = adapter->ena_dev;
ring->per_napi_packets = 0;
- ring->per_napi_bytes = 0;
ring->cpu = 0;
ring->first_interrupt = false;
ring->no_interrupt_event_cnt = 0;
@@ -196,6 +195,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
rxr->smoothed_interval =
ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
rxr->empty_rx_queue = 0;
+ adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
}
@@ -712,6 +712,7 @@ static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
for (i = 0; i < adapter->num_queues; i++) {
ena_qid = ENA_IO_RXQ_IDX(i);
+ cancel_work_sync(&adapter->ena_napi[i].dim.work);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
}
@@ -823,7 +824,8 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
above_thresh =
ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
ENA_TX_WAKEUP_THRESH);
- if (netif_tx_queue_stopped(txq) && above_thresh) {
+ if (netif_tx_queue_stopped(txq) && above_thresh &&
+ test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.queue_wakeup++;
@@ -832,9 +834,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
__netif_tx_unlock(txq);
}
- tx_ring->per_napi_bytes += tx_bytes;
- tx_ring->per_napi_packets += tx_pkts;
-
return tx_pkts;
}
@@ -1118,7 +1117,6 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
} while (likely(res_budget));
work_done = budget - res_budget;
- rx_ring->per_napi_bytes += total_len;
rx_ring->per_napi_packets += work_done;
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.bytes += total_len;
@@ -1155,35 +1153,50 @@ error:
return 0;
}
-void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
- struct ena_ring *tx_ring)
+static void ena_dim_work(struct work_struct *w)
{
- /* We apply adaptive moderation on Rx path only.
- * Tx uses static interrupt moderation.
- */
- ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
- rx_ring->per_napi_packets,
- rx_ring->per_napi_bytes,
- &rx_ring->smoothed_interval,
- &rx_ring->moder_tbl_idx);
-
- /* Reset per napi packets/bytes */
- tx_ring->per_napi_packets = 0;
- tx_ring->per_napi_bytes = 0;
+ struct dim *dim = container_of(w, struct dim, work);
+ struct dim_cq_moder cur_moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
+
+ ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
+ dim->state = DIM_START_MEASURE;
+}
+
+static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
+{
+ struct dim_sample dim_sample;
+ struct ena_ring *rx_ring = ena_napi->rx_ring;
+
+ if (!rx_ring->per_napi_packets)
+ return;
+
+ rx_ring->non_empty_napi_events++;
+
+ dim_update_sample(rx_ring->non_empty_napi_events,
+ rx_ring->rx_stats.cnt,
+ rx_ring->rx_stats.bytes,
+ &dim_sample);
+
+ net_dim(&ena_napi->dim, dim_sample);
+
rx_ring->per_napi_packets = 0;
- rx_ring->per_napi_bytes = 0;
}
static void ena_unmask_interrupt(struct ena_ring *tx_ring,
struct ena_ring *rx_ring)
{
struct ena_eth_io_intr_reg intr_reg;
+ u32 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
+ rx_ring->smoothed_interval :
+ ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
/* Update intr register: rx intr delay,
* tx intr delay and interrupt unmask
*/
ena_com_update_intr_reg(&intr_reg,
- rx_ring->smoothed_interval,
+ rx_interval,
tx_ring->smoothed_interval,
true);
@@ -1260,9 +1273,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
* from the interrupt context (vs from sk_busy_loop)
*/
if (napi_complete_done(napi, rx_work_done)) {
- /* Tx and Rx share the same interrupt vector */
+ /* We apply adaptive moderation on Rx path only.
+ * Tx uses static interrupt moderation.
+ */
if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
- ena_adjust_intr_moderation(rx_ring, tx_ring);
+ ena_adjust_adaptive_rx_intr_moderation(ena_napi);
ena_unmask_interrupt(tx_ring, rx_ring);
}
@@ -1552,14 +1567,6 @@ static void ena_napi_enable_all(struct ena_adapter *adapter)
napi_enable(&adapter->ena_napi[i].napi);
}
-static void ena_restore_ethtool_params(struct ena_adapter *adapter)
-{
- adapter->tx_usecs = 0;
- adapter->rx_usecs = 0;
- adapter->tx_frames = 1;
- adapter->rx_frames = 1;
-}
-
/* Configure the Rx forwarding */
static int ena_rss_configure(struct ena_adapter *adapter)
{
@@ -1609,8 +1616,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
/* enable transmits */
netif_tx_start_all_queues(adapter->netdev);
- ena_restore_ethtool_params(adapter);
-
ena_napi_enable_all(adapter);
return 0;
@@ -1740,13 +1745,16 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
rc = ena_create_io_rx_queue(adapter, i);
if (rc)
goto create_err;
+ INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
}
return 0;
create_err:
- while (i--)
+ while (i--) {
+ cancel_work_sync(&adapter->ena_napi[i].dim.work);
ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
+ }
return rc;
}
@@ -2419,6 +2427,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev,
("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
host_info->num_cpus = num_online_cpus();
+ host_info->driver_supported_features =
+ ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
+
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
if (rc == -EOPNOTSUPP)
@@ -3485,10 +3496,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
calc_queue_ctx.pdev = pdev;
- /* initial Tx interrupt delay, Assumes 1 usec granularity.
+ /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity
*/
ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
+ ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
+ ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
rc = ena_calc_queue_size(&calc_queue_ctx);
if (rc || io_queue_num <= 0) {
@@ -3618,7 +3631,6 @@ err_free_msix:
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_worker_destroy:
- ena_com_destroy_interrupt_moderation(ena_dev);
del_timer(&adapter->timer_service);
err_netdev_destroy:
free_netdev(netdev);
@@ -3679,8 +3691,6 @@ static void ena_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
- ena_com_destroy_interrupt_moderation(ena_dev);
-
vfree(ena_dev);
}