summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_virtchnl.c')
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c376
1 files changed, 208 insertions, 168 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index a5f9b7a5effe..07a9f5ae34fd 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1,90 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/rx.h>
+
#include "idpf.h"
#include "idpf_virtchnl.h"
-
-#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
-#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
-#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
-#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
-#define IDPF_VC_XN_RING_LEN U8_MAX
-
-/**
- * enum idpf_vc_xn_state - Virtchnl transaction status
- * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
- * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
- * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
- * buffer updated
- * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
- * was an error, buffer not updated
- * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
- * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
- * return context; a callback may be provided to handle
- * return
- */
-enum idpf_vc_xn_state {
- IDPF_VC_XN_IDLE = 1,
- IDPF_VC_XN_WAITING,
- IDPF_VC_XN_COMPLETED_SUCCESS,
- IDPF_VC_XN_COMPLETED_FAILED,
- IDPF_VC_XN_SHUTDOWN,
- IDPF_VC_XN_ASYNC,
-};
-
-struct idpf_vc_xn;
-/* Callback for asynchronous messages */
-typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
- const struct idpf_ctlq_msg *);
-
-/**
- * struct idpf_vc_xn - Data structure representing virtchnl transactions
- * @completed: virtchnl event loop uses that to signal when a reply is
- * available, uses kernel completion API
- * @state: virtchnl event loop stores the data below, protected by the
- * completion's lock.
- * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
- * truncated on its way to the receiver thread according to
- * reply_buf.iov_len.
- * @reply: Reference to the buffer(s) where the reply data should be written
- * to. May be 0-length (then NULL address permitted) if the reply data
- * should be ignored.
- * @async_handler: if sent asynchronously, a callback can be provided to handle
- * the reply when it's received
- * @vc_op: corresponding opcode sent with this transaction
- * @idx: index used as retrieval on reply receive, used for cookie
- * @salt: changed every message to make unique, used for cookie
- */
-struct idpf_vc_xn {
- struct completion completed;
- enum idpf_vc_xn_state state;
- size_t reply_sz;
- struct kvec reply;
- async_vc_cb async_handler;
- u32 vc_op;
- u8 idx;
- u8 salt;
-};
-
-/**
- * struct idpf_vc_xn_params - Parameters for executing transaction
- * @send_buf: kvec for send buffer
- * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
- * @timeout_ms: timeout to wait for reply
- * @async: send message asynchronously, will not wait on completion
- * @async_handler: If sent asynchronously, optional callback handler. The user
- * must be careful when using async handlers as the memory for
- * the recv_buf _cannot_ be on stack if this is async.
- * @vc_op: virtchnl op to send
- */
-struct idpf_vc_xn_params {
- struct kvec send_buf;
- struct kvec recv_buf;
- int timeout_ms;
- bool async;
- async_vc_cb async_handler;
- u32 vc_op;
-};
+#include "idpf_ptp.h"
/**
* struct idpf_vc_xn_manager - Manager for tracking transactions
@@ -139,7 +60,7 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter,
}
np = netdev_priv(vport->netdev);
- vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+ np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
if (vport->link_up == v2e->link_status)
return;
@@ -233,6 +154,55 @@ err_kfree:
return err;
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * idpf_ptp_is_mb_msg - Check if the message is PTP-related
+ * @op: virtchnl opcode
+ *
+ * Return: true if msg is PTP-related, false otherwise.
+ */
+static bool idpf_ptp_is_mb_msg(u32 op)
+{
+ switch (op) {
+ case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
+ case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * idpf_prepare_ptp_mb_msg - Prepare PTP related message
+ *
+ * @adapter: Driver specific private structure
+ * @op: virtchnl opcode
+ * @ctlq_msg: Corresponding control queue message
+ */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{
+ /* If the message is PTP-related and the secondary mailbox is available,
+ * send the message through the secondary mailbox.
+ */
+ if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
+ return;
+
+ ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
+ ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
+ ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
+}
+#else /* !CONFIG_PTP_1588_CLOCK */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{ }
+#endif /* CONFIG_PTP_1588_CLOCK */
+
/**
* idpf_send_mb_msg - Send message over mailbox
* @adapter: Driver specific private structure
@@ -276,6 +246,9 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
ctlq_msg->func_id = 0;
+
+ idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
+
ctlq_msg->data_len = msg_size;
ctlq_msg->cookie.mbx.chnl_opcode = op;
ctlq_msg->cookie.mbx.chnl_retval = 0;
@@ -447,8 +420,8 @@ static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
* >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
* error.
*/
-static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
- const struct idpf_vc_xn_params *params)
+ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
{
const struct kvec *send_buf = &params->send_buf;
struct idpf_vc_xn *xn;
@@ -515,8 +488,10 @@ static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
retval = -ENXIO;
goto only_unlock;
case IDPF_VC_XN_WAITING:
- dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
- params->vc_op, params->timeout_ms);
+ dev_notice_ratelimited(&adapter->pdev->dev,
+ "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
+ params->vc_op, cookie, xn->vc_op,
+ xn->salt, params->timeout_ms);
retval = -ETIME;
break;
case IDPF_VC_XN_COMPLETED_SUCCESS:
@@ -610,14 +585,16 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
return -EINVAL;
}
xn = &adapter->vcxn_mngr->ring[xn_idx];
+ idpf_vc_xn_lock(xn);
salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
if (xn->salt != salt) {
- dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
- xn->salt, salt);
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
+ xn->vc_op, xn->salt, xn->state,
+ ctlq_msg->cookie.mbx.chnl_opcode, salt);
+ idpf_vc_xn_unlock(xn);
return -EINVAL;
}
- idpf_vc_xn_lock(xn);
switch (xn->state) {
case IDPF_VC_XN_WAITING:
/* success */
@@ -664,7 +641,7 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
if (ctlq_msg->data_len) {
payload = ctlq_msg->ctx.indirect.payload->va;
- payload_size = ctlq_msg->ctx.indirect.payload->size;
+ payload_size = ctlq_msg->data_len;
}
xn->reply_sz = payload_size;
@@ -750,7 +727,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
int i;
for (i = 0; i < vport->num_txq; i++)
- set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags);
+ idpf_queue_set(SW_MARKER, vport->txqs[i]);
event = wait_event_timeout(vport->sw_marker_wq,
test_and_clear_bit(IDPF_VPORT_SW_MARKER,
@@ -758,7 +735,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
msecs_to_jiffies(500));
for (i = 0; i < vport->num_txq; i++)
- clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+ idpf_queue_clear(POLL_MODE, vport->txqs[i]);
if (event)
return 0;
@@ -894,7 +871,8 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_MACFILTER |
VIRTCHNL2_CAP_SPLITQ_QSCHED |
VIRTCHNL2_CAP_PROMISC |
- VIRTCHNL2_CAP_LOOPBACK);
+ VIRTCHNL2_CAP_LOOPBACK |
+ VIRTCHNL2_CAP_PTP);
xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
xn_params.send_buf.iov_base = &caps;
@@ -1092,7 +1070,6 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
int num_regs, u32 q_type)
{
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_queue *q;
int i, j, k = 0;
switch (q_type) {
@@ -1111,6 +1088,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u16 num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
+ struct idpf_rx_queue *q;
+
q = rx_qgrp->singleq.rxqs[j];
q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]);
@@ -1123,6 +1102,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]);
@@ -1253,12 +1234,12 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
vport_msg->vport_index = cpu_to_le16(idx);
- if (adapter->req_tx_splitq)
+ if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
- if (adapter->req_rx_splitq)
+ if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
@@ -1290,10 +1271,6 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
err = reply_sz;
goto free_vport_params;
}
- if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
- err = -EIO;
- goto free_vport_params;
- }
return 0;
@@ -1320,10 +1297,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport_msg = adapter->vport_params_recvd[vport->idx];
+ if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
+ (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
+ vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
+ pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
+ return -EOPNOTSUPP;
+ }
+
rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
@@ -1333,7 +1317,7 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport->base_rxd = true;
}
- if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT)
+ if (!idpf_is_queue_model_split(vport->txq_model))
return 0;
if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
@@ -1449,19 +1433,19 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].model =
cpu_to_le16(vport->txq_model);
qi[k].type =
- cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qi[k].ring_len =
cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
qi[k].dma_ring_addr =
cpu_to_le64(tx_qgrp->txqs[j]->dma);
if (idpf_is_queue_model_split(vport->txq_model)) {
- struct idpf_queue *q = tx_qgrp->txqs[j];
+ struct idpf_tx_queue *q = tx_qgrp->txqs[j];
qi[k].tx_compl_queue_id =
cpu_to_le16(tx_qgrp->complq->q_id);
qi[k].relative_queue_id = cpu_to_le16(j);
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags))
+ if (idpf_queue_has(FLOW_SCH_EN, q))
qi[k].sched_mode =
cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
else
@@ -1478,11 +1462,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qi[k].model = cpu_to_le16(vport->txq_model);
- qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags))
+ if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
else
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
@@ -1567,17 +1551,18 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
goto setup_rxqs;
for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_queue *bufq =
+ struct idpf_buf_queue *bufq =
&rx_qgrp->splitq.bufq_sets[j].bufq;
qi[k].queue_id = cpu_to_le32(bufq->q_id);
qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(bufq->q_type);
+ qi[k].type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
qi[k].ring_len = cpu_to_le16(bufq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
- qi[k].buffer_notif_stride = bufq->rx_buf_stride;
+ qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
qi[k].rx_buffer_low_watermark =
cpu_to_le16(bufq->rx_buffer_low_watermark);
if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
@@ -1591,35 +1576,47 @@ setup_rxqs:
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_queue *rxq;
+ const struct idpf_bufq_set *sets;
+ struct idpf_rx_queue *rxq;
if (!idpf_is_queue_model_split(vport->rxq_model)) {
rxq = rx_qgrp->singleq.rxqs[j];
goto common_qi_fields;
}
+
rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- qi[k].rx_bufq1_id =
- cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id);
+ sets = rxq->bufq_sets;
+
+ /* In splitq mode, RXQ buffer size should be
+ * set to that of the first buffer queue
+ * associated with this RXQ.
+ */
+ rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
+
+ qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
qi[k].rx_bufq2_id =
- cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id);
+ cpu_to_le16(sets[1].bufq.q_id);
}
qi[k].rx_buffer_low_watermark =
cpu_to_le16(rxq->rx_buffer_low_watermark);
if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
-common_qi_fields:
- if (rxq->rx_hsplit_en) {
+ rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
+
+ if (idpf_queue_has(HSPLIT_EN, rxq)) {
qi[k].qflags |=
cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
qi[k].hdr_buffer_size =
cpu_to_le16(rxq->rx_hbuf_size);
}
+
+common_qi_fields:
qi[k].queue_id = cpu_to_le32(rxq->q_id);
qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(rxq->q_type);
+ qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
qi[k].ring_len = cpu_to_le16(rxq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
@@ -1706,7 +1703,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1720,7 +1717,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
for (i = 0; i < vport->num_txq_grp; i++, k++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1741,12 +1738,12 @@ setup_rx:
qc[k].start_queue_id =
cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
qc[k].type =
- cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
} else {
qc[k].start_queue_id =
cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
qc[k].type =
- cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
}
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1761,10 +1758,11 @@ setup_rx:
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_queue *q;
+ const struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- qc[k].type = cpu_to_le32(q->q_type);
+ qc[k].type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qc[k].start_queue_id = cpu_to_le32(q->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1849,7 +1847,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ vqv[k].queue_type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
if (idpf_is_queue_model_split(vport->txq_model)) {
@@ -1879,14 +1878,15 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
if (idpf_is_queue_model_split(vport->rxq_model))
rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
rxq = rx_qgrp->singleq.rxqs[j];
- vqv[k].queue_type = cpu_to_le32(rxq->q_type);
+ vqv[k].queue_type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
vqv[k].queue_id = cpu_to_le32(rxq->q_id);
vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
@@ -1975,7 +1975,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
* queues virtchnl message is sent
*/
for (i = 0; i < vport->num_txq; i++)
- set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+ idpf_queue_set(POLL_MODE, vport->txqs[i]);
/* schedule the napi to receive all the marker packets */
local_bh_disable();
@@ -2469,39 +2469,52 @@ do_memcpy:
* @frag: fragmentation allowed
*
*/
-static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
+static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
struct idpf_ptype_state *pstate,
bool ipv4, bool frag)
{
if (!pstate->outer_ip || !pstate->outer_frag) {
- ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP;
pstate->outer_ip = true;
if (ipv4)
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
else
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
if (frag) {
- ptype->outer_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->outer_frag = LIBETH_RX_PT_FRAG;
pstate->outer_frag = true;
}
} else {
- ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP;
+ ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
if (ipv4)
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV4;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
else
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV6;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
if (frag)
- ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
}
}
+static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
+{
+ if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->inner_prot)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
+ else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->outer_ip)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
+ else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
+ else
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
+
+ libeth_rx_pt_gen_hash_type(ptype);
+}
+
/**
* idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
* @vport: virtual port data structure
@@ -2512,7 +2525,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
- struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
+ struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vc_xn_params xn_params = {};
@@ -2520,12 +2533,17 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
ssize_t reply_sz;
int i, j, k;
+ if (vport->rx_ptype_lkup)
+ return 0;
+
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
else
max_ptype = IDPF_RX_MAX_BASE_PTYPE;
- memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
+ if (!ptype_lkup)
+ return -ENOMEM;
get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
if (!get_ptype_info)
@@ -2556,9 +2574,6 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
if (reply_sz < 0)
return reply_sz;
- if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
- return -EIO;
-
ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
if (ptypes_recvd > max_ptype)
return -EINVAL;
@@ -2583,16 +2598,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
IDPF_INVALID_PTYPE_ID)
- return 0;
+ goto out;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
else
k = ptype->ptype_id_8;
- if (ptype->proto_id_count)
- ptype_lkup[k].known = 1;
-
for (j = 0; j < ptype->proto_id_count; j++) {
id = le16_to_cpu(ptype->proto_id[j]);
switch (id) {
@@ -2600,18 +2612,18 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
if (pstate.tunnel_state ==
IDPF_PTYPE_TUNNEL_IP) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT;
}
break;
case VIRTCHNL2_PROTO_HDR_MAC:
ptype_lkup[k].outer_ip =
- IDPF_RX_PTYPE_OUTER_L2;
+ LIBETH_RX_PT_OUTER_L2;
if (pstate.tunnel_state ==
IDPF_TUN_IP_GRE) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
}
@@ -2638,23 +2650,23 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
case VIRTCHNL2_PROTO_HDR_UDP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_UDP;
+ LIBETH_RX_PT_INNER_UDP;
break;
case VIRTCHNL2_PROTO_HDR_TCP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_TCP;
+ LIBETH_RX_PT_INNER_TCP;
break;
case VIRTCHNL2_PROTO_HDR_SCTP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_SCTP;
+ LIBETH_RX_PT_INNER_SCTP;
break;
case VIRTCHNL2_PROTO_HDR_ICMP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_ICMP;
+ LIBETH_RX_PT_INNER_ICMP;
break;
case VIRTCHNL2_PROTO_HDR_PAY:
ptype_lkup[k].payload_layer =
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2;
+ LIBETH_RX_PT_PAYLOAD_L2;
break;
case VIRTCHNL2_PROTO_HDR_ICMPV6:
case VIRTCHNL2_PROTO_HDR_IPV6_EH:
@@ -2708,9 +2720,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
}
}
+
+ idpf_finalize_ptype_lookup(&ptype_lkup[k]);
}
}
+out:
+ vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
+
return 0;
}
@@ -2984,6 +3001,11 @@ restart:
goto err_intr_req;
}
+ err = idpf_ptp_init(adapter);
+ if (err)
+ pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
+ ERR_PTR(err));
+
idpf_init_avail_queues(adapter);
/* Skew the delay for init tasks for each function based on fn number
@@ -3022,7 +3044,6 @@ init_failed:
adapter->state = __IDPF_VER_CHECK;
if (adapter->vcxn_mngr)
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
- idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
msecs_to_jiffies(task_delay));
@@ -3037,13 +3058,23 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
+ bool remove_in_prog;
+
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
return;
- idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+ /* Avoid transaction timeouts when called during reset */
+ remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
+ if (!remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
+ idpf_ptp_release(adapter);
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
+ if (remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
@@ -3105,6 +3136,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
u16 rx_itr[] = {2, 8, 32, 96, 128};
struct idpf_rss_data *rss_data;
u16 idx = vport->idx;
+ int err;
vport_config = adapter->vport_config[idx];
rss_data = &vport_config->user_config.rss_data;
@@ -3125,7 +3157,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
- vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD;
+ vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
@@ -3139,6 +3171,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
idpf_vport_alloc_vec_indexes(vport);
vport->crc_enable = adapter->crc_enable;
+
+ if (!(vport_msg->vport_flags &
+ cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
+ return;
+
+ err = idpf_ptp_get_vport_tstamps_caps(vport);
+ if (err) {
+ pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
+ return;
+ }
+
+ INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
}
/**
@@ -3242,7 +3286,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
int num_qids,
u32 q_type)
{
- struct idpf_queue *q;
int i, j, k = 0;
switch (q_type) {
@@ -3250,11 +3293,8 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) {
+ for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
tx_qgrp->txqs[j]->q_id = qids[k];
- tx_qgrp->txqs[j]->q_type =
- VIRTCHNL2_QUEUE_TYPE_TX;
- }
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
@@ -3268,12 +3308,13 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
+ struct idpf_rx_queue *q;
+
if (idpf_is_queue_model_split(vport->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
q->q_id = qids[k];
- q->q_type = VIRTCHNL2_QUEUE_TYPE_RX;
}
}
break;
@@ -3282,8 +3323,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
tx_qgrp->complq->q_id = qids[k];
- tx_qgrp->complq->q_type =
- VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
@@ -3292,9 +3331,10 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->q_id = qids[k];
- q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
}
}
break;