summaryrefslogtreecommitdiff
path: root/kernel/bpf/reuseport_array.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2019-05-31 16:52:56 -0700
committerAlexei Starovoitov <ast@kernel.org>2019-05-31 16:52:57 -0700
commitd168286d773ca7d5f9e8de8765216557839579d8 (patch)
treeedd6ec707ebbf68a89fc1c3fc2b2d06364978ad3 /kernel/bpf/reuseport_array.c
parent576240cfaf206ea7d08ae7421088a788d861212d (diff)
parentc85d69135a9175c50a823d04d62d932312d037b3 (diff)
Merge branch 'map-charge-cleanup'
Roman Gushchin says: ==================== During my work on memcg-based memory accounting for bpf maps I've done some cleanups and refactorings of the existing memlock rlimit-based code. It makes it more robust, unifies size to pages conversion, size checks and corresponding error codes. Also it adds coverage for cgroup local storage and socket local storage maps. It looks like some preliminary work on the mm side might be required to start working on the memcg-based accounting, so I'm sending these patches as a separate patchset. ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/reuseport_array.c')
-rw-r--r--kernel/bpf/reuseport_array.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index 18e225de80ff..50c083ba978c 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -151,7 +151,8 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
{
int err, numa_node = bpf_map_attr_numa_node(attr);
struct reuseport_array *array;
- u64 cost, array_size;
+ struct bpf_map_memory mem;
+ u64 array_size;
if (!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
@@ -159,24 +160,20 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
array_size = sizeof(*array);
array_size += (u64)attr->max_entries * sizeof(struct sock *);
- /* make sure there is no u32 overflow later in round_up() */
- cost = array_size;
- if (cost >= U32_MAX - PAGE_SIZE)
- return ERR_PTR(-ENOMEM);
- cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
-
- err = bpf_map_precharge_memlock(cost);
+ err = bpf_map_charge_init(&mem, array_size);
if (err)
return ERR_PTR(err);
/* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size, numa_node);
- if (!array)
+ if (!array) {
+ bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
+ }
/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
- array->map.pages = cost;
+ bpf_map_charge_move(&array->map.memory, &mem);
return &array->map;
}