summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/idpf/idpf_txrx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c96
1 files changed, 49 insertions, 47 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 631679cdaa6f..66a1b040639d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -383,12 +383,12 @@ err_out:
*/
static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
{
- if (unlikely(!rx_buf->page))
+ if (unlikely(!rx_buf->netmem))
return;
- page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
+ libeth_rx_recycle_slow(rx_buf->netmem);
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
rx_buf->offset = 0;
}
@@ -2184,6 +2184,19 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
+/* Global conditions to tell whether the txq (and related resources)
+ * has room to allow the use of "size" descriptors.
+ */
+static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
+{
+ if (IDPF_DESC_UNUSED(tx_q) < size ||
+ IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
+ IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
+ IDPF_TX_BUF_RSV_LOW(tx_q))
+ return 0;
+ return 1;
+}
+
/**
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
@@ -2194,29 +2207,11 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed)
{
- if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
- goto out;
-
- /* If there are too many outstanding completions expected on the
- * completion queue, stop the TX queue to give the device some time to
- * catch up
- */
- if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
- IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
- goto splitq_stop;
-
- /* Also check for available book keeping buffers; if we are low, stop
- * the queue to wait for more completions
- */
- if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
- goto splitq_stop;
-
- return 0;
-
-splitq_stop:
- netif_stop_subqueue(tx_q->netdev, tx_q->idx);
+ if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ idpf_txq_has_room(tx_q, descs_needed),
+ 1, 1))
+ return 0;
-out:
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
@@ -2242,12 +2237,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
- if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
- u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.q_busy);
- u64_stats_update_end(&tx_q->stats_sync);
- }
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -3251,10 +3240,10 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size)
{
- u32 hr = rx_buf->page->pp->p.offset;
+ u32 hr = netmem_get_pp(rx_buf->netmem)->p.offset;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->offset + hr, size, rx_buf->truesize);
+ skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, rx_buf->netmem,
+ rx_buf->offset + hr, size, rx_buf->truesize);
}
/**
@@ -3277,16 +3266,22 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
struct libeth_fqe *buf, u32 data_len)
{
u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
+ struct page *hdr_page, *buf_page;
const void *src;
void *dst;
- if (!libeth_rx_sync_for_cpu(buf, copy))
+ if (unlikely(netmem_is_net_iov(buf->netmem)) ||
+ !libeth_rx_sync_for_cpu(buf, copy))
return 0;
- dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
- src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
- memcpy(dst, src, LARGEST_ALIGN(copy));
+ hdr_page = __netmem_to_page(hdr->netmem);
+ buf_page = __netmem_to_page(buf->netmem);
+ dst = page_address(hdr_page) + hdr->offset +
+ pp_page_to_nmdesc(hdr_page)->pp->p.offset;
+ src = page_address(buf_page) + buf->offset +
+ pp_page_to_nmdesc(buf_page)->pp->p.offset;
+ memcpy(dst, src, LARGEST_ALIGN(copy));
buf->offset += copy;
return copy;
@@ -3302,11 +3297,12 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
*/
struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
{
- u32 hr = buf->page->pp->p.offset;
+ struct page *buf_page = __netmem_to_page(buf->netmem);
+ u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
struct sk_buff *skb;
void *va;
- va = page_address(buf->page) + buf->offset;
+ va = page_address(buf_page) + buf->offset;
prefetch(va + hr);
skb = napi_build_skb(va, buf->truesize);
@@ -3440,7 +3436,8 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
if (unlikely(!hdr_len && !skb)) {
hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
- pkt_len -= hdr_len;
+ /* If failed, drop both buffers by setting len to 0 */
+ pkt_len -= hdr_len ? : pkt_len;
u64_stats_update_begin(&rxq->stats_sync);
u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
@@ -3457,7 +3454,7 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
u64_stats_update_end(&rxq->stats_sync);
}
- hdr->page = NULL;
+ hdr->netmem = 0;
payload:
if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
@@ -3473,7 +3470,7 @@ payload:
break;
skip_data:
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
idpf_rx_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
@@ -4360,9 +4357,13 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
int idpf_vport_intr_alloc(struct idpf_vport *vport)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
+ struct idpf_vport_user_config_data *user_config;
struct idpf_q_vector *q_vector;
+ struct idpf_q_coalesce *q_coal;
u32 complqs_per_vector, v_idx;
+ u16 idx = vport->idx;
+ user_config = &vport->adapter->vport_config[idx]->user_config;
vport->q_vectors = kcalloc(vport->num_q_vectors,
sizeof(struct idpf_q_vector), GFP_KERNEL);
if (!vport->q_vectors)
@@ -4380,14 +4381,15 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
q_vector = &vport->q_vectors[v_idx];
+ q_coal = &user_config->q_coalesce[v_idx];
q_vector->vport = vport;
- q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
- q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->tx_itr_value = q_coal->tx_coalesce_usecs;
+ q_vector->tx_intr_mode = q_coal->tx_intr_mode;
q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
- q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
- q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->rx_itr_value = q_coal->rx_coalesce_usecs;
+ q_vector->rx_intr_mode = q_coal->rx_intr_mode;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),