summaryrefslogtreecommitdiff
path: root/kernel/bpf/devmap.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2019-05-31 16:52:56 -0700
committerAlexei Starovoitov <ast@kernel.org>2019-05-31 16:52:57 -0700
commitd168286d773ca7d5f9e8de8765216557839579d8 (patch)
treeedd6ec707ebbf68a89fc1c3fc2b2d06364978ad3 /kernel/bpf/devmap.c
parent576240cfaf206ea7d08ae7421088a788d861212d (diff)
parentc85d69135a9175c50a823d04d62d932312d037b3 (diff)
Merge branch 'map-charge-cleanup'
Roman Gushchin says: ==================== During my work on memcg-based memory accounting for bpf maps I've done some cleanups and refactorings of the existing memlock rlimit-based code. It makes it more robust, unifies size to pages conversion, size checks and corresponding error codes. Also it adds coverage for cgroup local storage and socket local storage maps. It looks like some preliminary work on the mm side might be required to start working on the memcg-based accounting, so I'm sending these patches as a separate patchset. ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/devmap.c')
-rw-r--r--kernel/bpf/devmap.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 1e525d70f833..5ae7cce5ef16 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -108,13 +108,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
/* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
cost += dev_map_bitmap_size(attr) * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- goto free_dtab;
-
- dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- /* if map size is larger than memlock limit, reject it early */
- err = bpf_map_precharge_memlock(dtab->map.pages);
+ /* if map size is larger than memlock limit, reject it */
+ err = bpf_map_charge_init(&dtab->map.memory, cost);
if (err)
goto free_dtab;
@@ -125,19 +121,21 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
__alignof__(unsigned long),
GFP_KERNEL | __GFP_NOWARN);
if (!dtab->flush_needed)
- goto free_dtab;
+ goto free_charge;
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
- goto free_dtab;
+ goto free_charge;
spin_lock(&dev_map_lock);
list_add_tail_rcu(&dtab->list, &dev_map_list);
spin_unlock(&dev_map_lock);
return &dtab->map;
+free_charge:
+ bpf_map_charge_finish(&dtab->map.memory);
free_dtab:
free_percpu(dtab->flush_needed);
kfree(dtab);