summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/chelsio/cxgb4/sge.c
diff options
context:
space:
mode:
authorHariprasad Shenai <hariprasad@chelsio.com>2016-04-11 11:07:58 +0530
committerDavid S. Miller <davem@davemloft.net>2016-04-11 12:21:39 -0400
commitebf4dc2b1b2b9c8c7797f01a952bce3cf0247a4f (patch)
tree16a4d0a8f98413ada10b25d77d7d5c036edba015 /drivers/net/ethernet/chelsio/cxgb4/sge.c
parent734e00fa02eff5003827abc06a7ebf9449349109 (diff)
cxgb4: Stop Rx Queues before freeing it up
Stop all Ethernet RX Queues before freeing up various Ingress/Egress Queues, etc. We were seeing cases of Ingress Queues not getting serviced during the shutdown process leading to Ingress Paths jamming up through the chip and blocking the shutdown effort itself. One such case involved the Firmware sending a "Flush Token" through the ULP-TX -> ULP-RX path for an Ethernet TX Queue being freed in order to make sure there weren't any remaining TX Work Requests in the pipeline. But the return path was stalled by Ingress Data unable to be delivered to the Host because those Ingress Queues were no longer being serviced. Based on original work by Casey Leedom <leedom@chelsio.com> Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/sge.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 13b144bcf725..6278e5a74b74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
void t4_free_sge_resources(struct adapter *adap)
{
int i;
- struct sge_eth_rxq *eq = adap->sge.ethrxq;
- struct sge_eth_txq *etq = adap->sge.ethtxq;
+ struct sge_eth_rxq *eq;
+ struct sge_eth_txq *etq;
+
+ /* stop all Rx queues in order to start them draining */
+ for (i = 0; i < adap->sge.ethqsets; i++) {
+ eq = &adap->sge.ethrxq[i];
+ if (eq->rspq.desc)
+ t4_iq_stop(adap, adap->mbox, adap->pf, 0,
+ FW_IQ_TYPE_FL_INT_CAP,
+ eq->rspq.cntxt_id,
+ eq->fl.size ? eq->fl.cntxt_id : 0xffff,
+ 0xffff);
+ }
/* clean up Ethernet Tx/Rx queues */
- for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+ for (i = 0; i < adap->sge.ethqsets; i++) {
+ eq = &adap->sge.ethrxq[i];
if (eq->rspq.desc)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
+
+ etq = &adap->sge.ethtxq[i];
if (etq->q.desc) {
t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);