summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
blob: ea977f283c22cee6737e4c7feb725a808ac1177c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */

#ifndef _ICE_TXRX_LIB_H_
#define _ICE_TXRX_LIB_H_
#include "ice.h"

/**
 * ice_set_rx_bufs_act - propagate Rx buffer action to frags
 * @xdp: XDP buffer representing frame (linear and frags part)
 * @rx_ring: Rx ring struct
 * act: action to store onto Rx buffers related to XDP buffer parts
 *
 * Set action that should be taken before putting Rx buffer from first frag
 * to one before last. Last one is handled by caller of this function as it
 * is the EOP frag that is currently being processed. This function is
 * supposed to be called only when XDP buffer contains frags.
 */
static inline void
ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
		    const unsigned int act)
{
	const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
	u32 first = rx_ring->first_desc;
	u32 nr_frags = sinfo->nr_frags;
	u32 cnt = rx_ring->count;
	struct ice_rx_buf *buf;

	for (int i = 0; i < nr_frags; i++) {
		buf = &rx_ring->rx_buf[first];
		buf->act = act;

		if (++first == cnt)
			first = 0;
	}
}

/**
 * ice_test_staterr - tests bits in Rx descriptor status and error fields
 * @status_err_n: Rx descriptor status_error0 or status_error1 bits
 * @stat_err_bits: value to mask
 *
 * This function does some fast chicanery in order to return the
 * value of the mask which is really only used for boolean tests.
 * The status_error_len doesn't need to be shifted because it begins
 * at offset zero.
 */
static inline bool
ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
{
	return !!(status_err_n & cpu_to_le16(stat_err_bits));
}

/**
 * ice_is_non_eop - process handling of non-EOP buffers
 * @rx_ring: Rx ring being processed
 * @rx_desc: Rx descriptor for current buffer
 *
 * If the buffer is an EOP buffer, this function exits returning false,
 * otherwise return true indicating that this is in fact a non-EOP buffer.
 */
static inline bool
ice_is_non_eop(const struct ice_rx_ring *rx_ring,
	       const union ice_32b_rx_flex_desc *rx_desc)
{
	/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
	if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
		return false;

	rx_ring->ring_stats->rx_stats.non_eop_descs++;

	return true;
}

static inline __le64
ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
{
	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
}

/**
 * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
 * @rx_desc: Rx 32b flex descriptor with RXDID=2
 *
 * The OS and current PF implementation only support stripping a single VLAN tag
 * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
 * one is found return the tag, else return 0 to mean no VLAN tag was found.
 */
static inline u16
ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
{
	u16 stat_err_bits;

	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
	if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
		return le16_to_cpu(rx_desc->wb.l2tag1);

	stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
	if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
		return le16_to_cpu(rx_desc->wb.l2tag2_2nd);

	return 0;
}

/**
 * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
 * @xdp_ring: XDP Tx ring
 *
 * This function updates the XDP Tx ring tail register.
 */
static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
{
	/* Force memory writes to complete before letting h/w
	 * know there are new descriptors to fetch.
	 */
	wmb();
	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
}

/**
 * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 *
 * returns index of descriptor that had RS bit produced on
 */
static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
{
	u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
	struct ice_tx_desc *tx_desc;

	tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
	tx_desc->cmd_type_offset_bsz |=
		cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);

	return rs_idx;
}

void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
int ice_xmit_xdp_ring(struct xdp_frame *xdpf, struct ice_tx_ring *xdp_ring);
int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
		       union ice_32b_rx_flex_desc *rx_desc,
		       struct sk_buff *skb, u16 ptype);
void
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
#endif /* !_ICE_TXRX_LIB_H_ */