summaryrefslogtreecommitdiff
path: root/kernel/bpf
diff options
context:
space:
mode:
authorToke Høiland-Jørgensen <toke@redhat.com>2019-11-21 14:36:12 +0100
committerAlexei Starovoitov <ast@kernel.org>2019-11-24 16:58:46 -0800
commit071cdecec57fb5d5df78e6a12114ad7bccea5b0e (patch)
tree6280415744ee6ce2e137d8d90262b20de7f6184f /kernel/bpf
parent1f60750401c7672419e1b606e2b73727c1f8626c (diff)
xdp: Fix cleanup on map free for devmap_hash map type
Tetsuo pointed out that it was not only the device unregister hook that was broken for devmap_hash types, it was also cleanup on map free. So better fix this as well. While we're at it, there's no reason to allocate the netdev_map array for DEVMAP_HASH, so skip that and adjust the cost accordingly. Fixes: 6f9d451ab1a3 ("xdp: Add devmap_hash map type for looking up devices by hashed index") Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20191121133612.430414-1-toke@redhat.com
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/devmap.c74
1 files changed, 46 insertions, 28 deletions
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 3867864cdc2f..3d3d61b5985b 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -74,7 +74,7 @@ struct bpf_dtab_netdev {
struct bpf_dtab {
struct bpf_map map;
- struct bpf_dtab_netdev **netdev_map;
+ struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
struct list_head __percpu *flush_list;
struct list_head list;
@@ -101,6 +101,12 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries)
return hash;
}
+static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
+ int idx)
+{
+ return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
+}
+
static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
{
int err, cpu;
@@ -120,8 +126,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
bpf_map_init_from_attr(&dtab->map, attr);
/* make sure page count doesn't overflow */
- cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
- cost += sizeof(struct list_head) * num_possible_cpus();
+ cost = (u64) sizeof(struct list_head) * num_possible_cpus();
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
@@ -129,6 +134,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
if (!dtab->n_buckets) /* Overflow check */
return -EINVAL;
cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
+ } else {
+ cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
}
/* if map size is larger than memlock limit, reject it */
@@ -143,24 +150,22 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
- dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
- sizeof(struct bpf_dtab_netdev *),
- dtab->map.numa_node);
- if (!dtab->netdev_map)
- goto free_percpu;
-
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
if (!dtab->dev_index_head)
- goto free_map_area;
+ goto free_percpu;
spin_lock_init(&dtab->index_lock);
+ } else {
+ dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
+ sizeof(struct bpf_dtab_netdev *),
+ dtab->map.numa_node);
+ if (!dtab->netdev_map)
+ goto free_percpu;
}
return 0;
-free_map_area:
- bpf_map_area_free(dtab->netdev_map);
free_percpu:
free_percpu(dtab->flush_list);
free_charge:
@@ -228,21 +233,40 @@ static void dev_map_free(struct bpf_map *map)
cond_resched();
}
- for (i = 0; i < dtab->map.max_entries; i++) {
- struct bpf_dtab_netdev *dev;
+ if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+ for (i = 0; i < dtab->n_buckets; i++) {
+ struct bpf_dtab_netdev *dev;
+ struct hlist_head *head;
+ struct hlist_node *next;
- dev = dtab->netdev_map[i];
- if (!dev)
- continue;
+ head = dev_map_index_hash(dtab, i);
- free_percpu(dev->bulkq);
- dev_put(dev->dev);
- kfree(dev);
+ hlist_for_each_entry_safe(dev, next, head, index_hlist) {
+ hlist_del_rcu(&dev->index_hlist);
+ free_percpu(dev->bulkq);
+ dev_put(dev->dev);
+ kfree(dev);
+ }
+ }
+
+ kfree(dtab->dev_index_head);
+ } else {
+ for (i = 0; i < dtab->map.max_entries; i++) {
+ struct bpf_dtab_netdev *dev;
+
+ dev = dtab->netdev_map[i];
+ if (!dev)
+ continue;
+
+ free_percpu(dev->bulkq);
+ dev_put(dev->dev);
+ kfree(dev);
+ }
+
+ bpf_map_area_free(dtab->netdev_map);
}
free_percpu(dtab->flush_list);
- bpf_map_area_free(dtab->netdev_map);
- kfree(dtab->dev_index_head);
kfree(dtab);
}
@@ -263,12 +287,6 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
-static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
- int idx)
-{
- return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
-}
-
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);