diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_xsk.c')
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_xsk.c | 113 |
1 files changed, 79 insertions, 34 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index cd7b52fb6b46..9f47388eaba5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -2,11 +2,8 @@ /* Copyright(c) 2018 Intel Corporation. */ #include <linux/bpf_trace.h> -#include <linux/stringify.h> +#include <linux/unroll.h> #include <net/xdp_sock_drv.h> -#include <net/xdp.h> - -#include "i40e.h" #include "i40e_txrx_common.h" #include "i40e_xsk.h" @@ -294,13 +291,18 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, { unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; + struct skb_shared_info *sinfo = NULL; struct sk_buff *skb; + u32 nr_frags = 0; + if (unlikely(xdp_buff_has_frags(xdp))) { + sinfo = xdp_get_shared_info_from_buff(xdp); + nr_frags = sinfo->nr_frags; + } net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); if (unlikely(!skb)) goto out; @@ -312,6 +314,28 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, __skb_pull(skb, metasize); } + if (likely(!xdp_buff_has_frags(xdp))) + goto out; + + for (int i = 0; i < nr_frags; i++) { + struct skb_shared_info *skinfo = skb_shinfo(skb); + skb_frag_t *frag = &sinfo->frags[i]; + struct page *page; + void *addr; + + page = dev_alloc_page(); + if (!page) { + dev_kfree_skb(skb); + return NULL; + } + addr = page_to_virt(page); + + memcpy(addr, skb_frag_page(frag), skb_frag_size(frag)); + + __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++, + addr, 0, skb_frag_size(frag)); + } + out: xsk_buff_free(xdp); return skb; @@ -322,14 +346,13 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc, unsigned int *rx_packets, unsigned int *rx_bytes, - unsigned int size, unsigned int xdp_res, bool *failure) { struct sk_buff *skb; *rx_packets = 1; - *rx_bytes = size; + *rx_bytes = xdp_get_buff_len(xdp_buff); if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX) return; @@ -363,7 +386,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, return; } - *rx_bytes = skb->len; i40e_process_skb_fields(rx_ring, rx_desc, skb); napi_gro_receive(&rx_ring->q_vector->napi, skb); return; @@ -384,12 +406,17 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 next_to_process = rx_ring->next_to_process; u16 next_to_clean = rx_ring->next_to_clean; - u16 count_mask = rx_ring->count - 1; unsigned int xdp_res, xdp_xmit = 0; + struct xdp_buff *first = NULL; + u32 count = rx_ring->count; struct bpf_prog *xdp_prog; + u32 entries_to_alloc; bool failure = false; - u16 cleaned_count; + + if (next_to_process != next_to_clean) + first = *i40e_rx_bi(rx_ring, next_to_clean); /* NB! xdp_prog will always be !NULL, due to the fact that * this path is enabled by setting an XDP program. @@ -404,7 +431,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) unsigned int size; u64 qword; - rx_desc = I40E_RX_DESC(rx_ring, next_to_clean); + rx_desc = I40E_RX_DESC(rx_ring, next_to_process); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); /* This memory barrier is needed to keep us from reading @@ -417,37 +444,52 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) i40e_clean_programming_status(rx_ring, rx_desc->raw.qword[0], qword); - bi = *i40e_rx_bi(rx_ring, next_to_clean); + bi = *i40e_rx_bi(rx_ring, next_to_process); xsk_buff_free(bi); - next_to_clean = (next_to_clean + 1) & count_mask; + if (++next_to_process == count) + next_to_process = 0; continue; } - size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword); if (!size) break; - bi = *i40e_rx_bi(rx_ring, next_to_clean); + bi = *i40e_rx_bi(rx_ring, next_to_process); xsk_buff_set_size(bi, size); - xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); + xsk_buff_dma_sync_for_cpu(bi); - xdp_res = i40e_run_xdp_zc(rx_ring, bi, xdp_prog); - i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets, - &rx_bytes, size, xdp_res, &failure); + if (!first) + first = bi; + else if (!xsk_buff_add_frag(first, bi)) { + xsk_buff_free(first); + break; + } + + if (++next_to_process == count) + next_to_process = 0; + + if (i40e_is_non_eop(rx_ring, rx_desc)) + continue; + + xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog); + i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets, + &rx_bytes, xdp_res, &failure); + next_to_clean = next_to_process; if (failure) break; total_rx_packets += rx_packets; total_rx_bytes += rx_bytes; xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR); - next_to_clean = (next_to_clean + 1) & count_mask; + first = NULL; } rx_ring->next_to_clean = next_to_clean; - cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask; + rx_ring->next_to_process = next_to_process; - if (cleaned_count >= I40E_RX_BUFFER_WRITE) - failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); + entries_to_alloc = I40E_DESC_UNUSED(rx_ring); + if (entries_to_alloc >= I40E_RX_BUFFER_WRITE) + failure |= !i40e_alloc_rx_buffers_zc(rx_ring, entries_to_alloc); i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); @@ -466,6 +508,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, unsigned int *total_bytes) { + u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(desc); struct i40e_tx_desc *tx_desc; dma_addr_t dma; @@ -474,8 +517,7 @@ static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); tx_desc->buffer_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP, - 0, desc->len, 0); + tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc->len, 0); *total_bytes += desc->len; } @@ -488,15 +530,16 @@ static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *des dma_addr_t dma; u32 i; - loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { + unrolled_count(PKTS_PER_BATCH) + for (i = 0; i < PKTS_PER_BATCH; i++) { + u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]); + dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); tx_desc = I40E_TX_DESC(xdp_ring, ntu++); tx_desc->buffer_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | - I40E_TX_DESC_CMD_EOP, - 0, desc[i].len, 0); + tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc[i].len, 0); *total_bytes += desc[i].len; } @@ -582,7 +625,7 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, * @vsi: Current VSI * @tx_ring: XDP Tx ring * - * Returns true if cleanup/tranmission is done. + * Returns true if cleanup/transmission is done. **/ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) { @@ -683,14 +726,16 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) { - u16 count_mask = rx_ring->count - 1; u16 ntc = rx_ring->next_to_clean; u16 ntu = rx_ring->next_to_use; - for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) { + while (ntc != ntu) { struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc); xsk_buff_free(rx_bi); + ntc++; + if (ntc >= rx_ring->count) + ntc = 0; } } |
