summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ti/cpsw_ethtool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/ti/cpsw_ethtool.c')
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c97
1 files changed, 62 insertions, 35 deletions
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 6d1c9ebae7cc..31248a6cc642 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -458,21 +458,22 @@ int cpsw_nway_reset(struct net_device *ndev)
static void cpsw_suspend_data_pass(struct net_device *ndev)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- struct cpsw_slave *slave;
int i;
/* Disable NAPI scheduling */
cpsw_intr_disable(cpsw);
/* Stop all transmit queues for every network device.
- * Disable re-using rx descriptors with dormant_on.
*/
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!(ndev && netif_running(ndev)))
continue;
- netif_tx_stop_all_queues(slave->ndev);
- netif_dormant_on(slave->ndev);
+ netif_tx_stop_all_queues(ndev);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
}
/* Handle rest of tx packets and stop cpdma channels */
@@ -483,14 +484,8 @@ static int cpsw_resume_data_pass(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
int i, ret;
- /* Allow rx packets handling */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
- if (slave->ndev && netif_running(slave->ndev))
- netif_dormant_off(slave->ndev);
-
/* After this receive is started */
if (cpsw->usage_count) {
ret = cpsw_fill_rx_channels(priv);
@@ -502,9 +497,11 @@ static int cpsw_resume_data_pass(struct net_device *ndev)
}
/* Resume transmit for every affected interface */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
- if (slave->ndev && netif_running(slave->ndev))
- netif_tx_start_all_queues(slave->ndev);
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (ndev && netif_running(ndev))
+ netif_tx_start_all_queues(ndev);
+ }
return 0;
}
@@ -581,14 +578,26 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx,
return 0;
}
+static void cpsw_fail(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (ndev)
+ dev_close(ndev);
+ }
+}
+
int cpsw_set_channels_common(struct net_device *ndev,
struct ethtool_channels *chs,
cpdma_handler_fn rx_handler)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- int i, ret;
+ struct net_device *sl_ndev;
+ int i, new_pools, ret;
ret = cpsw_check_ch_settings(cpsw, chs);
if (ret < 0)
@@ -596,6 +605,8 @@ int cpsw_set_channels_common(struct net_device *ndev,
cpsw_suspend_data_pass(ndev);
+ new_pools = (chs->rx_count != cpsw->rx_ch_num) && cpsw->usage_count;
+
ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler);
if (ret)
goto err;
@@ -604,35 +615,40 @@ int cpsw_set_channels_common(struct net_device *ndev,
if (ret)
goto err;
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ sl_ndev = cpsw->slaves[i].ndev;
+ if (!(sl_ndev && netif_running(sl_ndev)))
continue;
/* Inform stack about new count of queues */
- ret = netif_set_real_num_tx_queues(slave->ndev,
- cpsw->tx_ch_num);
+ ret = netif_set_real_num_tx_queues(sl_ndev, cpsw->tx_ch_num);
if (ret) {
dev_err(priv->dev, "cannot set real number of tx queues\n");
goto err;
}
- ret = netif_set_real_num_rx_queues(slave->ndev,
- cpsw->rx_ch_num);
+ ret = netif_set_real_num_rx_queues(sl_ndev, cpsw->rx_ch_num);
if (ret) {
dev_err(priv->dev, "cannot set real number of rx queues\n");
goto err;
}
}
- if (cpsw->usage_count)
- cpsw_split_res(cpsw);
+ cpsw_split_res(cpsw);
+
+ if (new_pools) {
+ cpsw_destroy_xdp_rxqs(cpsw);
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret)
+ goto err;
+ }
ret = cpsw_resume_data_pass(ndev);
if (!ret)
return 0;
err:
dev_err(priv->dev, "cannot update channels number, closing device\n");
- dev_close(ndev);
+ cpsw_fail(cpsw);
return ret;
}
@@ -652,9 +668,8 @@ void cpsw_get_ringparam(struct net_device *ndev,
int cpsw_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int ret;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ int descs_num, ret;
/* ignore ering->tx_pending - only rx_pending adjustment is supported */
@@ -663,22 +678,34 @@ int cpsw_set_ringparam(struct net_device *ndev,
ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES))
return -EINVAL;
- if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
+ descs_num = cpdma_get_num_rx_descs(cpsw->dma);
+ if (ering->rx_pending == descs_num)
return 0;
cpsw_suspend_data_pass(ndev);
- cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+ ret = cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+ if (ret) {
+ if (cpsw_resume_data_pass(ndev))
+ goto err;
+
+ return ret;
+ }
- if (cpsw->usage_count)
- cpdma_chan_split_pool(cpsw->dma);
+ if (cpsw->usage_count) {
+ cpsw_destroy_xdp_rxqs(cpsw);
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret)
+ goto err;
+ }
ret = cpsw_resume_data_pass(ndev);
if (!ret)
return 0;
-
+err:
+ cpdma_set_num_rx_descs(cpsw->dma, descs_num);
dev_err(cpsw->dev, "cannot set ring params, closing device\n");
- dev_close(ndev);
+ cpsw_fail(cpsw);
return ret;
}