summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2021-04-20 17:03:53 -0700
committerDavid S. Miller <davem@davemloft.net>2021-04-20 17:03:53 -0700
commiteeddfd8e8d392bc94968d87e7a408ba9e9be4722 (patch)
tree5c267d126e1c9d81b4b8b1a69b0aa6415dd9c4b8
parent4acd47644ef1e1c8f8f5bc40b7cf1c5b9bcbbc4e (diff)
parent172e269edfce34bac7c61c15551816bda4b0f140 (diff)
Merge branch 'sfc-txq-lookups'
Edward Cree says: ==================== sfc: fix TXQ lookups The TXQ handling changes in 12804793b17c ("sfc: decouple TXQ type from label") which were made as part of the support for encap offloads on EF10 caused some breakage on Siena (5000- and 6000-series) NICs, which caused null-dereference kernel panics. This series fixes those issues, and also a similarly incorrect code-path on EF10 which worked by chance. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/sfc/farch.c16
2 files changed, 9 insertions, 10 deletions
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index da6886dcac37..4fa72b573c17 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2928,8 +2928,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
/* Get the transmit queue */
tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
- tx_queue = efx_channel_get_tx_queue(channel,
- tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
if (!tx_queue->timestamping) {
/* Transmit completion */
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index d75cf5ff5686..49df02ecee91 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ tx_queue = channel->tx_queue +
+ (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ tx_queue = channel->tx_queue +
+ (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
netif_tx_lock(efx->net_dev);
efx_farch_notify_tx_desc(tx_queue);
@@ -1081,16 +1081,16 @@ static void
efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
int qid;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
- tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
- qid % EFX_MAX_TXQ_PER_CHANNEL);
- if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
+ channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
+ tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
+ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
efx_farch_magic_event(tx_queue->channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
- }
}
}