diff options
Diffstat (limited to 'kernel/bpf/local_storage.c')
-rw-r--r-- | kernel/bpf/local_storage.c | 61 |
1 files changed, 37 insertions, 24 deletions
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 2d4f9ac12377..3969eb0382af 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -1,6 +1,7 @@ -//SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0 #include <linux/bpf-cgroup.h> #include <linux/bpf.h> +#include <linux/bpf_local_storage.h> #include <linux/btf.h> #include <linux/bug.h> #include <linux/filter.h> @@ -8,8 +9,7 @@ #include <linux/rbtree.h> #include <linux/slab.h> #include <uapi/linux/btf.h> - -DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); +#include <linux/btf_ids.h> #ifdef CONFIG_CGROUP_BPF @@ -141,8 +141,8 @@ static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key) return &READ_ONCE(storage->buf)->data[0]; } -static int cgroup_storage_update_elem(struct bpf_map *map, void *key, - void *value, u64 flags) +static long cgroup_storage_update_elem(struct bpf_map *map, void *key, + void *value, u64 flags) { struct bpf_cgroup_storage *storage; struct bpf_storage_buffer *new; @@ -151,7 +151,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *key, return -EINVAL; if (unlikely((flags & BPF_F_LOCK) && - !map_value_has_spin_lock(map))) + !btf_record_has_field(map->record, BPF_SPIN_LOCK))) return -EINVAL; storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, @@ -164,15 +164,14 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *key, return 0; } - new = bpf_map_kmalloc_node(map, sizeof(struct bpf_storage_buffer) + - map->value_size, - __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN, + new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size), + __GFP_ZERO | GFP_NOWAIT | __GFP_NOWARN, map->numa_node); if (!new) return -ENOMEM; memcpy(&new->data[0], value, map->value_size); - check_and_init_map_lock(map, new->data); + check_and_init_map_value(map, new->data); new = xchg(&storage->buf, new); kfree_rcu(new, rcu); @@ -285,9 +284,17 @@ enoent: static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) { + __u32 max_value_size = BPF_LOCAL_STORAGE_MAX_VALUE_SIZE; int numa_node = bpf_map_attr_numa_node(attr); struct bpf_cgroup_storage_map *map; + /* percpu is bound by PCPU_MIN_UNIT_SIZE, non-percu + * is the same as other local storages. + */ + if (attr->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) + max_value_size = min_t(__u32, max_value_size, + PCPU_MIN_UNIT_SIZE); + if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) && attr->key_size != sizeof(__u64)) return ERR_PTR(-EINVAL); @@ -295,7 +302,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) if (attr->value_size == 0) return ERR_PTR(-EINVAL); - if (attr->value_size > PAGE_SIZE) + if (attr->value_size > max_value_size) return ERR_PTR(-E2BIG); if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK || @@ -306,8 +313,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) /* max_entries is not used and enforced to be 0 */ return ERR_PTR(-EINVAL); - map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map), - __GFP_ZERO | GFP_USER | __GFP_ACCOUNT, numa_node); + map = bpf_map_area_alloc(sizeof(struct bpf_cgroup_storage_map), numa_node); if (!map) return ERR_PTR(-ENOMEM); @@ -327,22 +333,22 @@ static void cgroup_storage_map_free(struct bpf_map *_map) struct list_head *storages = &map->list; struct bpf_cgroup_storage *storage, *stmp; - mutex_lock(&cgroup_mutex); + cgroup_lock(); list_for_each_entry_safe(storage, stmp, storages, list_map) { bpf_cgroup_storage_unlink(storage); bpf_cgroup_storage_free(storage); } - mutex_unlock(&cgroup_mutex); + cgroup_unlock(); WARN_ON(!RB_EMPTY_ROOT(&map->root)); WARN_ON(!list_empty(&map->list)); - kfree(map); + bpf_map_area_free(map); } -static int cgroup_storage_delete_elem(struct bpf_map *map, void *key) +static long cgroup_storage_delete_elem(struct bpf_map *map, void *key) { return -EINVAL; } @@ -408,7 +414,7 @@ static int cgroup_storage_check_btf(const struct bpf_map *map, static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key, struct seq_file *m) { - enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); + enum bpf_cgroup_storage_type stype; struct bpf_cgroup_storage *storage; int cpu; @@ -425,7 +431,7 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key, seq_puts(m, ": "); btf_type_seq_show(map->btf, map->btf_value_type_id, &READ_ONCE(storage->buf)->data[0], m); - seq_puts(m, "\n"); + seq_putc(m, '\n'); } else { seq_puts(m, ": {\n"); for_each_possible_cpu(cpu) { @@ -433,14 +439,21 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key, btf_type_seq_show(map->btf, map->btf_value_type_id, per_cpu_ptr(storage->percpu_buf, cpu), m); - seq_puts(m, "\n"); + seq_putc(m, '\n'); } seq_puts(m, "}\n"); } rcu_read_unlock(); } -static int cgroup_storage_map_btf_id; +static u64 cgroup_storage_map_usage(const struct bpf_map *map) +{ + /* Currently the dynamically allocated elements are not counted. */ + return sizeof(struct bpf_cgroup_storage_map); +} + +BTF_ID_LIST_SINGLE(cgroup_storage_map_btf_ids, struct, + bpf_cgroup_storage_map) const struct bpf_map_ops cgroup_storage_map_ops = { .map_alloc = cgroup_storage_map_alloc, .map_free = cgroup_storage_map_free, @@ -450,8 +463,8 @@ const struct bpf_map_ops cgroup_storage_map_ops = { .map_delete_elem = cgroup_storage_delete_elem, .map_check_btf = cgroup_storage_check_btf, .map_seq_show_elem = cgroup_storage_seq_show_elem, - .map_btf_name = "bpf_cgroup_storage_map", - .map_btf_id = &cgroup_storage_map_btf_id, + .map_mem_usage = cgroup_storage_map_usage, + .map_btf_id = &cgroup_storage_map_btf_ids[0], }; int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map) @@ -508,7 +521,7 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, map->numa_node); if (!storage->buf) goto enomem; - check_and_init_map_lock(map, storage->buf->data); + check_and_init_map_value(map, storage->buf->data); } else { storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp); if (!storage->percpu_buf) |