summaryrefslogtreecommitdiff
path: root/include/net/libeth
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/libeth')
-rw-r--r--include/net/libeth/cache.h66
-rw-r--r--include/net/libeth/rx.h261
-rw-r--r--include/net/libeth/tx.h129
-rw-r--r--include/net/libeth/types.h25
4 files changed, 481 insertions, 0 deletions
diff --git a/include/net/libeth/cache.h b/include/net/libeth/cache.h
new file mode 100644
index 000000000000..bdb0c043ce61
--- /dev/null
+++ b/include/net/libeth/cache.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_CACHE_H
+#define __LIBETH_CACHE_H
+
+#include <linux/cache.h>
+
+/**
+ * libeth_cacheline_group_assert - make sure cacheline group size is expected
+ * @type: type of the structure containing the group
+ * @grp: group name inside the struct
+ * @sz: expected group size
+ */
+#if defined(CONFIG_64BIT) && SMP_CACHE_BYTES == 64
+#define libeth_cacheline_group_assert(type, grp, sz) \
+ static_assert(offsetof(type, __cacheline_group_end__##grp) - \
+ offsetofend(type, __cacheline_group_begin__##grp) == \
+ (sz))
+#define __libeth_cacheline_struct_assert(type, sz) \
+ static_assert(sizeof(type) == (sz))
+#else /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */
+#define libeth_cacheline_group_assert(type, grp, sz) \
+ static_assert(offsetof(type, __cacheline_group_end__##grp) - \
+ offsetofend(type, __cacheline_group_begin__##grp) <= \
+ (sz))
+#define __libeth_cacheline_struct_assert(type, sz) \
+ static_assert(sizeof(type) <= (sz))
+#endif /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */
+
+#define __libeth_cls1(sz1) SMP_CACHE_ALIGN(sz1)
+#define __libeth_cls2(sz1, sz2) (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2))
+#define __libeth_cls3(sz1, sz2, sz3) \
+ (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2) + SMP_CACHE_ALIGN(sz3))
+#define __libeth_cls(...) \
+ CONCATENATE(__libeth_cls, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+
+/**
+ * libeth_cacheline_struct_assert - make sure CL-based struct size is expected
+ * @type: type of the struct
+ * @...: from 1 to 3 CL group sizes (read-mostly, read-write, cold)
+ *
+ * When a struct contains several CL groups, it's difficult to predict its size
+ * on different architectures. The macro instead takes sizes of all of the
+ * groups the structure contains and generates the final struct size.
+ */
+#define libeth_cacheline_struct_assert(type, ...) \
+ __libeth_cacheline_struct_assert(type, __libeth_cls(__VA_ARGS__)); \
+ static_assert(__alignof(type) >= SMP_CACHE_BYTES)
+
+/**
+ * libeth_cacheline_set_assert - make sure CL-based struct layout is expected
+ * @type: type of the struct
+ * @ro: expected size of the read-mostly group
+ * @rw: expected size of the read-write group
+ * @c: expected size of the cold group
+ *
+ * Check that each group size is expected and then do final struct size check.
+ */
+#define libeth_cacheline_set_assert(type, ro, rw, c) \
+ libeth_cacheline_group_assert(type, read_mostly, ro); \
+ libeth_cacheline_group_assert(type, read_write, rw); \
+ libeth_cacheline_group_assert(type, cold, c); \
+ libeth_cacheline_struct_assert(type, ro, rw, c)
+
+#endif /* __LIBETH_CACHE_H */
diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h
new file mode 100644
index 000000000000..43574bd6612f
--- /dev/null
+++ b/include/net/libeth/rx.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_RX_H
+#define __LIBETH_RX_H
+
+#include <linux/if_vlan.h>
+
+#include <net/page_pool/helpers.h>
+#include <net/xdp.h>
+
+/* Rx buffer management */
+
+/* Space reserved in front of each frame */
+#define LIBETH_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+/* Maximum headroom for worst-case calculations */
+#define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM
+/* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */
+#define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN)
+/* Maximum supported L2-L4 header length */
+#define LIBETH_MAX_HEAD roundup_pow_of_two(max(MAX_HEADER, 256))
+
+/* Always use order-0 pages */
+#define LIBETH_RX_PAGE_ORDER 0
+/* Pick a sane buffer stride and align to a cacheline boundary */
+#define LIBETH_RX_BUF_STRIDE SKB_DATA_ALIGN(128)
+/* HW-writeable space in one buffer: truesize - headroom/tailroom, aligned */
+#define LIBETH_RX_PAGE_LEN(hr) \
+ ALIGN_DOWN(SKB_MAX_ORDER(hr, LIBETH_RX_PAGE_ORDER), \
+ LIBETH_RX_BUF_STRIDE)
+
+/**
+ * struct libeth_fqe - structure representing an Rx buffer (fill queue element)
+ * @page: page holding the buffer
+ * @offset: offset from the page start (to the headroom)
+ * @truesize: total space occupied by the buffer (w/ headroom and tailroom)
+ *
+ * Depending on the MTU, API switches between one-page-per-frame and shared
+ * page model (to conserve memory on bigger-page platforms). In case of the
+ * former, @offset is always 0 and @truesize is always ```PAGE_SIZE```.
+ */
+struct libeth_fqe {
+ struct page *page;
+ u32 offset;
+ u32 truesize;
+} __aligned_largest;
+
+/**
+ * enum libeth_fqe_type - enum representing types of Rx buffers
+ * @LIBETH_FQE_MTU: buffer size is determined by MTU
+ * @LIBETH_FQE_SHORT: buffer size is smaller than MTU, for short frames
+ * @LIBETH_FQE_HDR: buffer size is ```LIBETH_MAX_HEAD```-sized, for headers
+ */
+enum libeth_fqe_type {
+ LIBETH_FQE_MTU = 0U,
+ LIBETH_FQE_SHORT,
+ LIBETH_FQE_HDR,
+};
+
+/**
+ * struct libeth_fq - structure representing a buffer (fill) queue
+ * @fp: hotpath part of the structure
+ * @pp: &page_pool for buffer management
+ * @fqes: array of Rx buffers
+ * @truesize: size to allocate per buffer, w/overhead
+ * @count: number of descriptors/buffers the queue has
+ * @type: type of the buffers this queue has
+ * @hsplit: flag whether header split is enabled
+ * @buf_len: HW-writeable length per each buffer
+ * @nid: ID of the closest NUMA node with memory
+ */
+struct libeth_fq {
+ struct_group_tagged(libeth_fq_fp, fp,
+ struct page_pool *pp;
+ struct libeth_fqe *fqes;
+
+ u32 truesize;
+ u32 count;
+ );
+
+ /* Cold fields */
+ enum libeth_fqe_type type:2;
+ bool hsplit:1;
+
+ u32 buf_len;
+ int nid;
+};
+
+int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi);
+void libeth_rx_fq_destroy(struct libeth_fq *fq);
+
+/**
+ * libeth_rx_alloc - allocate a new Rx buffer
+ * @fq: fill queue to allocate for
+ * @i: index of the buffer within the queue
+ *
+ * Return: DMA address to be passed to HW for Rx on successful allocation,
+ * ```DMA_MAPPING_ERROR``` otherwise.
+ */
+static inline dma_addr_t libeth_rx_alloc(const struct libeth_fq_fp *fq, u32 i)
+{
+ struct libeth_fqe *buf = &fq->fqes[i];
+
+ buf->truesize = fq->truesize;
+ buf->page = page_pool_dev_alloc(fq->pp, &buf->offset, &buf->truesize);
+ if (unlikely(!buf->page))
+ return DMA_MAPPING_ERROR;
+
+ return page_pool_get_dma_addr(buf->page) + buf->offset +
+ fq->pp->p.offset;
+}
+
+void libeth_rx_recycle_slow(struct page *page);
+
+/**
+ * libeth_rx_sync_for_cpu - synchronize or recycle buffer post DMA
+ * @fqe: buffer to process
+ * @len: frame length from the descriptor
+ *
+ * Process the buffer after it's written by HW. The regular path is to
+ * synchronize DMA for CPU, but in case of no data it will be immediately
+ * recycled back to its PP.
+ *
+ * Return: true when there's data to process, false otherwise.
+ */
+static inline bool libeth_rx_sync_for_cpu(const struct libeth_fqe *fqe,
+ u32 len)
+{
+ struct page *page = fqe->page;
+
+ /* Very rare, but possible case. The most common reason:
+ * the last fragment contained FCS only, which was then
+ * stripped by the HW.
+ */
+ if (unlikely(!len)) {
+ libeth_rx_recycle_slow(page);
+ return false;
+ }
+
+ page_pool_dma_sync_for_cpu(page->pp, page, fqe->offset, len);
+
+ return true;
+}
+
+/* Converting abstract packet type numbers into a software structure with
+ * the packet parameters to do O(1) lookup on Rx.
+ */
+
+enum {
+ LIBETH_RX_PT_OUTER_L2 = 0U,
+ LIBETH_RX_PT_OUTER_IPV4,
+ LIBETH_RX_PT_OUTER_IPV6,
+};
+
+enum {
+ LIBETH_RX_PT_NOT_FRAG = 0U,
+ LIBETH_RX_PT_FRAG,
+};
+
+enum {
+ LIBETH_RX_PT_TUNNEL_IP_NONE = 0U,
+ LIBETH_RX_PT_TUNNEL_IP_IP,
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT,
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC,
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC_VLAN,
+};
+
+enum {
+ LIBETH_RX_PT_TUNNEL_END_NONE = 0U,
+ LIBETH_RX_PT_TUNNEL_END_IPV4,
+ LIBETH_RX_PT_TUNNEL_END_IPV6,
+};
+
+enum {
+ LIBETH_RX_PT_INNER_NONE = 0U,
+ LIBETH_RX_PT_INNER_UDP,
+ LIBETH_RX_PT_INNER_TCP,
+ LIBETH_RX_PT_INNER_SCTP,
+ LIBETH_RX_PT_INNER_ICMP,
+ LIBETH_RX_PT_INNER_TIMESYNC,
+};
+
+#define LIBETH_RX_PT_PAYLOAD_NONE PKT_HASH_TYPE_NONE
+#define LIBETH_RX_PT_PAYLOAD_L2 PKT_HASH_TYPE_L2
+#define LIBETH_RX_PT_PAYLOAD_L3 PKT_HASH_TYPE_L3
+#define LIBETH_RX_PT_PAYLOAD_L4 PKT_HASH_TYPE_L4
+
+struct libeth_rx_pt {
+ u32 outer_ip:2;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:3;
+ enum pkt_hash_types payload_layer:2;
+
+ u32 pad:2;
+ enum xdp_rss_hash_type hash_type:16;
+};
+
+void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt);
+
+/**
+ * libeth_rx_pt_get_ip_ver - get IP version from a packet type structure
+ * @pt: packet type params
+ *
+ * Wrapper to compile out the IPv6 code from the drivers when not supported
+ * by the kernel.
+ *
+ * Return: @pt.outer_ip or stub for IPv6 when not compiled-in.
+ */
+static inline u32 libeth_rx_pt_get_ip_ver(struct libeth_rx_pt pt)
+{
+#if !IS_ENABLED(CONFIG_IPV6)
+ switch (pt.outer_ip) {
+ case LIBETH_RX_PT_OUTER_IPV4:
+ return LIBETH_RX_PT_OUTER_IPV4;
+ default:
+ return LIBETH_RX_PT_OUTER_L2;
+ }
+#else
+ return pt.outer_ip;
+#endif
+}
+
+/* libeth_has_*() can be used to quickly check whether the HW metadata is
+ * available to avoid further expensive processing such as descriptor reads.
+ * They already check for the corresponding netdev feature to be enabled,
+ * thus can be used as drop-in replacements.
+ */
+
+static inline bool libeth_rx_pt_has_checksum(const struct net_device *dev,
+ struct libeth_rx_pt pt)
+{
+ /* Non-zero _INNER* is only possible when _OUTER_IPV* is set,
+ * it is enough to check only for the L4 type.
+ */
+ return likely(pt.inner_prot > LIBETH_RX_PT_INNER_NONE &&
+ (dev->features & NETIF_F_RXCSUM));
+}
+
+static inline bool libeth_rx_pt_has_hash(const struct net_device *dev,
+ struct libeth_rx_pt pt)
+{
+ return likely(pt.payload_layer > LIBETH_RX_PT_PAYLOAD_NONE &&
+ (dev->features & NETIF_F_RXHASH));
+}
+
+/**
+ * libeth_rx_pt_set_hash - fill in skb hash value basing on the PT
+ * @skb: skb to fill the hash in
+ * @hash: 32-bit hash value from the descriptor
+ * @pt: packet type
+ */
+static inline void libeth_rx_pt_set_hash(struct sk_buff *skb, u32 hash,
+ struct libeth_rx_pt pt)
+{
+ skb_set_hash(skb, hash, pt.payload_layer);
+}
+
+#endif /* __LIBETH_RX_H */
diff --git a/include/net/libeth/tx.h b/include/net/libeth/tx.h
new file mode 100644
index 000000000000..35614f9523f6
--- /dev/null
+++ b/include/net/libeth/tx.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_TX_H
+#define __LIBETH_TX_H
+
+#include <linux/skbuff.h>
+
+#include <net/libeth/types.h>
+
+/* Tx buffer completion */
+
+/**
+ * enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion
+ * @LIBETH_SQE_EMPTY: unused/empty, no action required
+ * @LIBETH_SQE_CTX: context descriptor with empty SQE, no action required
+ * @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree()
+ * @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA
+ * @LIBETH_SQE_SKB: &sk_buff, unmap and napi_consume_skb(), update stats
+ */
+enum libeth_sqe_type {
+ LIBETH_SQE_EMPTY = 0U,
+ LIBETH_SQE_CTX,
+ LIBETH_SQE_SLAB,
+ LIBETH_SQE_FRAG,
+ LIBETH_SQE_SKB,
+};
+
+/**
+ * struct libeth_sqe - represents a Send Queue Element / Tx buffer
+ * @type: type of the buffer, see the enum above
+ * @rs_idx: index of the last buffer from the batch this one was sent in
+ * @raw: slab buffer to free via kfree()
+ * @skb: &sk_buff to consume
+ * @dma: DMA address to unmap
+ * @len: length of the mapped region to unmap
+ * @nr_frags: number of frags in the frame this buffer belongs to
+ * @packets: number of physical packets sent for this frame
+ * @bytes: number of physical bytes sent for this frame
+ * @priv: driver-private scratchpad
+ */
+struct libeth_sqe {
+ enum libeth_sqe_type type:32;
+ u32 rs_idx;
+
+ union {
+ void *raw;
+ struct sk_buff *skb;
+ };
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+
+ u32 nr_frags;
+ u32 packets;
+ u32 bytes;
+
+ unsigned long priv;
+} __aligned_largest;
+
+/**
+ * LIBETH_SQE_CHECK_PRIV - check the driver's private SQE data
+ * @p: type or name of the object the driver wants to fit into &libeth_sqe
+ *
+ * Make sure the driver's private data fits into libeth_sqe::priv. To be used
+ * right after its declaration.
+ */
+#define LIBETH_SQE_CHECK_PRIV(p) \
+ static_assert(sizeof(p) <= sizeof_field(struct libeth_sqe, priv))
+
+/**
+ * struct libeth_cq_pp - completion queue poll params
+ * @dev: &device to perform DMA unmapping
+ * @ss: onstack NAPI stats to fill
+ * @napi: whether it's called from the NAPI context
+ *
+ * libeth uses this structure to access objects needed for performing full
+ * Tx complete operation without passing lots of arguments and change the
+ * prototypes each time a new one is added.
+ */
+struct libeth_cq_pp {
+ struct device *dev;
+ struct libeth_sq_napi_stats *ss;
+
+ bool napi;
+};
+
+/**
+ * libeth_tx_complete - perform Tx completion for one SQE
+ * @sqe: SQE to complete
+ * @cp: poll params
+ *
+ * Do Tx complete for all the types of buffers, incl. freeing, unmapping,
+ * updating the stats etc.
+ */
+static inline void libeth_tx_complete(struct libeth_sqe *sqe,
+ const struct libeth_cq_pp *cp)
+{
+ switch (sqe->type) {
+ case LIBETH_SQE_EMPTY:
+ return;
+ case LIBETH_SQE_SKB:
+ case LIBETH_SQE_FRAG:
+ case LIBETH_SQE_SLAB:
+ dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma),
+ dma_unmap_len(sqe, len), DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+
+ switch (sqe->type) {
+ case LIBETH_SQE_SKB:
+ cp->ss->packets += sqe->packets;
+ cp->ss->bytes += sqe->bytes;
+
+ napi_consume_skb(sqe->skb, cp->napi);
+ break;
+ case LIBETH_SQE_SLAB:
+ kfree(sqe->raw);
+ break;
+ default:
+ break;
+ }
+
+ sqe->type = LIBETH_SQE_EMPTY;
+}
+
+#endif /* __LIBETH_TX_H */
diff --git a/include/net/libeth/types.h b/include/net/libeth/types.h
new file mode 100644
index 000000000000..603825e45133
--- /dev/null
+++ b/include/net/libeth/types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_TYPES_H
+#define __LIBETH_TYPES_H
+
+#include <linux/types.h>
+
+/**
+ * struct libeth_sq_napi_stats - "hot" counters to update in Tx completion loop
+ * @packets: completed frames counter
+ * @bytes: sum of bytes of completed frames above
+ * @raw: alias to access all the fields as an array
+ */
+struct libeth_sq_napi_stats {
+ union {
+ struct {
+ u32 packets;
+ u32 bytes;
+ };
+ DECLARE_FLEX_ARRAY(u32, raw);
+ };
+};
+
+#endif /* __LIBETH_TYPES_H */