summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ti/cpsw_ethtool.c
diff options
context:
space:
mode:
authorIvan Khoronzhuk <ivan.khoronzhuk@linaro.org>2019-06-15 14:01:32 +0300
committerDavid S. Miller <davem@davemloft.net>2019-06-16 14:03:25 -0700
commit871e846585919adf727f21398f433fd424b6f0e1 (patch)
tree19da3891f6595a817ddb9f1c8895a1f71aa83ec7 /drivers/net/ethernet/ti/cpsw_ethtool.c
parent4e18a8a149d5577a171ddc5fe80cc715041b193d (diff)
net: ethernet: ti: davinci_cpdma: use idled submit
While data pass suspend, reuse of rx descriptors can be disabled using channel state & lock from cpdma layer. For this, submit to a channel has to be disabled using state != "not active" under lock, what is done with this patch. The same submit is used to fill rx channel while ndo_open, when channel is idled, so add idled submit routine that allows to prepare descs for the channel. All this simplifies code and helps to avoid dormant mode usage and send packets only to active channels, avoiding potential race in later on changes. Also add missed sync barrier analogically like in other places after stopping tx queues. Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/ti/cpsw_ethtool.c')
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c12
1 files changed, 3 insertions, 9 deletions
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 86697b32194d..f60dc1dfc443 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -464,7 +464,6 @@ static void cpsw_suspend_data_pass(struct net_device *ndev)
cpsw_intr_disable(cpsw);
/* Stop all transmit queues for every network device.
- * Disable re-using rx descriptors with dormant_on.
*/
for (i = 0; i < cpsw->data.slaves; i++) {
ndev = cpsw->slaves[i].ndev;
@@ -472,7 +471,9 @@ static void cpsw_suspend_data_pass(struct net_device *ndev)
continue;
netif_tx_stop_all_queues(ndev);
- netif_dormant_on(ndev);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
}
/* Handle rest of tx packets and stop cpdma channels */
@@ -485,13 +486,6 @@ static int cpsw_resume_data_pass(struct net_device *ndev)
struct cpsw_common *cpsw = priv->cpsw;
int i, ret;
- /* Allow rx packets handling */
- for (i = 0; i < cpsw->data.slaves; i++) {
- ndev = cpsw->slaves[i].ndev;
- if (ndev && netif_running(ndev))
- netif_dormant_off(ndev);
- }
-
/* After this receive is started */
if (cpsw->usage_count) {
ret = cpsw_fill_rx_channels(priv);