summaryrefslogtreecommitdiff
path: root/net/core/net-sysfs.c
diff options
context:
space:
mode:
authorAntoine Tenart <atenart@kernel.org>2021-03-18 19:37:44 +0100
committerDavid S. Miller <davem@davemloft.net>2021-03-18 14:56:22 -0700
commit5478fcd0f48322e04ae6c173ad3a1959e066dc83 (patch)
tree3228f7adfc6ee6a37638d546d563a628a3c7ec2f /net/core/net-sysfs.c
parent255c04a87f4381849fce9ed81e5efabf78a71a30 (diff)
net: embed nr_ids in the xps maps
Embed nr_ids (the number of cpu for the xps cpus map, and the number of rxqs for the xps cpus map) in dev_maps. That will help not accessing out of bound memory if those values change after dev_maps was allocated. Suggested-by: Alexander Duyck <alexander.duyck@gmail.com> Signed-off-by: Antoine Tenart <atenart@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/net-sysfs.c')
-rw-r--r--net/core/net-sysfs.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 1364d0f39cb0..bb08bdc88fa9 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1366,9 +1366,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
{
struct net_device *dev = queue->dev;
struct xps_dev_maps *dev_maps;
+ unsigned int index, nr_ids;
int j, len, ret, tc = 0;
unsigned long *mask;
- unsigned int index;
if (!netif_is_multiqueue(dev))
return -ENOENT;
@@ -1387,19 +1387,20 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
goto err_rtnl_unlock;
}
- mask = bitmap_zalloc(nr_cpu_ids, GFP_KERNEL);
+ rcu_read_lock();
+ dev_maps = rcu_dereference(dev->xps_cpus_map);
+ nr_ids = dev_maps ? dev_maps->nr_ids : nr_cpu_ids;
+
+ mask = bitmap_zalloc(nr_ids, GFP_KERNEL);
if (!mask) {
ret = -ENOMEM;
- goto err_rtnl_unlock;
+ goto err_rcu_unlock;
}
- rcu_read_lock();
- dev_maps = rcu_dereference(dev->xps_cpus_map);
if (!dev_maps || tc >= dev_maps->num_tc)
goto out_no_maps;
- for (j = -1; j = netif_attrmask_next(j, NULL, nr_cpu_ids),
- j < nr_cpu_ids;) {
+ for (j = -1; j = netif_attrmask_next(j, NULL, nr_ids), j < nr_ids;) {
int i, tci = j * dev_maps->num_tc + tc;
struct xps_map *map;
@@ -1419,10 +1420,12 @@ out_no_maps:
rtnl_unlock();
- len = bitmap_print_to_pagebuf(false, buf, mask, nr_cpu_ids);
+ len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids);
bitmap_free(mask);
return len < PAGE_SIZE ? len : -EINVAL;
+err_rcu_unlock:
+ rcu_read_unlock();
err_rtnl_unlock:
rtnl_unlock();
return ret;
@@ -1473,9 +1476,9 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
{
struct net_device *dev = queue->dev;
struct xps_dev_maps *dev_maps;
+ unsigned int index, nr_ids;
int j, len, ret, tc = 0;
unsigned long *mask;
- unsigned int index;
index = get_netdev_queue_index(queue);
@@ -1488,19 +1491,20 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
goto err_rtnl_unlock;
}
- mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
+ rcu_read_lock();
+ dev_maps = rcu_dereference(dev->xps_rxqs_map);
+ nr_ids = dev_maps ? dev_maps->nr_ids : dev->num_rx_queues;
+
+ mask = bitmap_zalloc(nr_ids, GFP_KERNEL);
if (!mask) {
ret = -ENOMEM;
- goto err_rtnl_unlock;
+ goto err_rcu_unlock;
}
- rcu_read_lock();
- dev_maps = rcu_dereference(dev->xps_rxqs_map);
if (!dev_maps || tc >= dev_maps->num_tc)
goto out_no_maps;
- for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues),
- j < dev->num_rx_queues;) {
+ for (j = -1; j = netif_attrmask_next(j, NULL, nr_ids), j < nr_ids;) {
int i, tci = j * dev_maps->num_tc + tc;
struct xps_map *map;
@@ -1520,11 +1524,13 @@ out_no_maps:
rtnl_unlock();
- len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
+ len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids);
bitmap_free(mask);
return len < PAGE_SIZE ? len : -EINVAL;
+err_rcu_unlock:
+ rcu_read_unlock();
err_rtnl_unlock:
rtnl_unlock();
return ret;