summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc/ef100_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/ef100_tx.c')
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c114
1 files changed, 100 insertions, 14 deletions
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index 26ef51d6b542..03005757c060 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -23,7 +23,7 @@
int ef100_tx_probe(struct efx_tx_queue *tx_queue)
{
/* Allocate an extra descriptor for the QMDA status completion entry */
- return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
+ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd,
(tx_queue->ptr_mask + 2) *
sizeof(efx_oword_t),
GFP_KERNEL);
@@ -101,8 +101,8 @@ static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
{
- if (likely(tx_queue->txd.buf.addr))
- return ((efx_oword_t *)tx_queue->txd.buf.addr) + index;
+ if (likely(tx_queue->txd.addr))
+ return ((efx_oword_t *)tx_queue->txd.addr) + index;
else
return NULL;
}
@@ -189,6 +189,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
{
bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
unsigned int len, ip_offset, tcp_offset, payload_segs;
+ u32 mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
unsigned int outer_ip_offset, outer_l4_offset;
u16 vlan_tci = skb_vlan_tag_get(skb);
@@ -200,8 +201,17 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
bool outer_csum;
u32 paylen;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
- mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (encap) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID_INNER)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ } else {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ }
+
if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX)
vlan_enable = skb_vlan_tag_present(skb);
@@ -245,8 +255,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1,
ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial,
ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial,
- ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid :
- ESE_GZ_TX_DESC_IP4_ID_NO_OP,
+ ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, mangleid_outer,
ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
);
@@ -254,7 +263,8 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
const struct sk_buff *skb,
- unsigned int segment_count)
+ unsigned int segment_count,
+ struct efx_rep *efv)
{
unsigned int old_write_count = tx_queue->write_count;
unsigned int new_write_count = old_write_count;
@@ -272,6 +282,20 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
else
next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND;
+ if (unlikely(efv)) {
+ /* Create TX override descriptor */
+ write_ptr = new_write_count & tx_queue->ptr_mask;
+ txd = ef100_tx_desc(tx_queue, write_ptr);
+ ++new_write_count;
+
+ tx_queue->packet_write_count = new_write_count;
+ EFX_POPULATE_OWORD_3(*txd,
+ ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX,
+ ESF_GZ_TX_PREFIX_EGRESS_MPORT, efv->mport,
+ ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1);
+ nr_descs--;
+ }
+
/* if it's a raw write (such as XDP) then always SEND single frames */
if (!skb)
nr_descs = 1;
@@ -306,6 +330,9 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
/* if it's a raw write (such as XDP) then always SEND */
next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG :
ESE_GZ_TX_DESC_TYPE_SEND;
+ /* mark as an EFV buffer if applicable */
+ if (unlikely(efv))
+ buffer->flags |= EFX_TX_BUF_EFV;
} while (new_write_count != tx_queue->insert_count);
@@ -324,11 +351,11 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
void ef100_tx_write(struct efx_tx_queue *tx_queue)
{
- ef100_tx_make_descriptors(tx_queue, NULL, 0);
+ ef100_tx_make_descriptors(tx_queue, NULL, 0, NULL);
ef100_tx_push_buffers(tx_queue);
}
-void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
+int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
{
unsigned int tx_done =
EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC);
@@ -339,7 +366,7 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
unsigned int tx_index = (tx_queue->read_count + tx_done - 1) &
tx_queue->ptr_mask;
- efx_xmit_done(tx_queue, tx_index);
+ return efx_xmit_done(tx_queue, tx_index);
}
/* Add a socket buffer to a TX queue
@@ -349,7 +376,14 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
* Returns 0 on success, error code otherwise. In case of an error this
* function will free the SKB.
*/
-int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
+{
+ return __ef100_enqueue_skb(tx_queue, skb, NULL);
+}
+
+int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ struct efx_rep *efv)
{
unsigned int old_insert_count = tx_queue->insert_count;
struct efx_nic *efx = tx_queue->efx;
@@ -376,16 +410,64 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return 0;
}
+ if (unlikely(efv)) {
+ struct efx_tx_buffer *buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+
+ /* Drop representor packets if the queue is stopped.
+ * We currently don't assert backoff to representors so this is
+ * to make sure representor traffic can't starve the main
+ * net device.
+ * And, of course, if there are no TX descriptors left.
+ */
+ if (netif_tx_queue_stopped(tx_queue->core_txq) ||
+ unlikely(efx_tx_buffer_in_use(buffer))) {
+ atomic64_inc(&efv->stats.tx_errors);
+ rc = -ENOSPC;
+ goto err;
+ }
+
+ /* Also drop representor traffic if it could cause us to
+ * stop the queue. If we assert backoff and we haven't
+ * received traffic on the main net device recently then the
+ * TX watchdog can go off erroneously.
+ */
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
+ fill_level += efx_tx_max_skb_descs(efx);
+ if (fill_level > efx->txq_stop_thresh) {
+ struct efx_tx_queue *txq2;
+
+ /* Refresh cached fill level and re-check */
+ efx_for_each_channel_tx_queue(txq2, tx_queue->channel)
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
+
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
+ fill_level += efx_tx_max_skb_descs(efx);
+ if (fill_level > efx->txq_stop_thresh) {
+ atomic64_inc(&efv->stats.tx_errors);
+ rc = -ENOSPC;
+ goto err;
+ }
+ }
+
+ buffer->flags = EFX_TX_BUF_OPTION | EFX_TX_BUF_EFV;
+ tx_queue->insert_count++;
+ }
+
/* Map for DMA and create descriptors */
rc = efx_tx_map_data(tx_queue, skb, segments);
if (rc)
goto err;
- ef100_tx_make_descriptors(tx_queue, skb, segments);
+ ef100_tx_make_descriptors(tx_queue, skb, segments, efv);
fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
if (fill_level > efx->txq_stop_thresh) {
struct efx_tx_queue *txq2;
+ /* Because of checks above, representor traffic should
+ * not be able to stop the queue.
+ */
+ WARN_ON(efv);
+
netif_tx_stop_queue(tx_queue->core_txq);
/* Re-read after a memory barrier in case we've raced with
* the completion path. Otherwise there's a danger we'll never
@@ -404,8 +486,12 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* If xmit_more then we don't need to push the doorbell, unless there
* are 256 descriptors already queued in which case we have to push to
* ensure we never push more than 256 at once.
+ *
+ * Always push for representor traffic, and don't account it to parent
+ * PF netdevice's BQL.
*/
- if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
+ if (unlikely(efv) ||
+ __netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
tx_queue->write_count - tx_queue->notify_count > 255)
ef100_tx_push_buffers(tx_queue);