diff options
Diffstat (limited to 'kernel/bpf/map_in_map.c')
| -rw-r--r-- | kernel/bpf/map_in_map.c | 59 |
1 files changed, 26 insertions, 33 deletions
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index cd5eafaba97e..645bd30bc9a9 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c @@ -11,35 +11,27 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) { struct bpf_map *inner_map, *inner_map_meta; u32 inner_map_meta_size; - struct fd f; - int ret; + CLASS(fd, f)(inner_map_ufd); - f = fdget(inner_map_ufd); inner_map = __bpf_map_get(f); if (IS_ERR(inner_map)) return inner_map; /* Does not support >1 level map-in-map */ - if (inner_map->inner_map_meta) { - ret = -EINVAL; - goto put; - } + if (inner_map->inner_map_meta) + return ERR_PTR(-EINVAL); - if (!inner_map->ops->map_meta_equal) { - ret = -ENOTSUPP; - goto put; - } + if (!inner_map->ops->map_meta_equal) + return ERR_PTR(-ENOTSUPP); inner_map_meta_size = sizeof(*inner_map_meta); /* In some cases verifier needs to access beyond just base map. */ - if (inner_map->ops == &array_map_ops) + if (inner_map->ops == &array_map_ops || inner_map->ops == &percpu_array_map_ops) inner_map_meta_size = sizeof(struct bpf_array); inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); - if (!inner_map_meta) { - ret = -ENOMEM; - goto put; - } + if (!inner_map_meta) + return ERR_PTR(-ENOMEM); inner_map_meta->map_type = inner_map->map_type; inner_map_meta->key_size = inner_map->key_size; @@ -53,8 +45,9 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) * invalid/empty/valid, but ERR_PTR in case of errors. During * equality NULL or IS_ERR is equivalent. */ - ret = PTR_ERR(inner_map_meta->record); - goto free; + struct bpf_map *ret = ERR_CAST(inner_map_meta->record); + kfree(inner_map_meta); + return ret; } /* Note: We must use the same BTF, as we also used btf_record_dup above * which relies on BTF being same for both maps, as some members like @@ -68,7 +61,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) /* Misc members not needed in bpf_map_meta_equal() check. */ inner_map_meta->ops = inner_map->ops; - if (inner_map->ops == &array_map_ops) { + if (inner_map->ops == &array_map_ops || inner_map->ops == &percpu_array_map_ops) { struct bpf_array *inner_array_meta = container_of(inner_map_meta, struct bpf_array, map); struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map); @@ -77,14 +70,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) inner_array_meta->elem_size = inner_array->elem_size; inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1; } - - fdput(f); return inner_map_meta; -free: - kfree(inner_map_meta); -put: - fdput(f); - return ERR_PTR(ret); } void bpf_map_meta_free(struct bpf_map *map_meta) @@ -110,9 +96,8 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map, int ufd) { struct bpf_map *inner_map, *inner_map_meta; - struct fd f; + CLASS(fd, f)(ufd); - f = fdget(ufd); inner_map = __bpf_map_get(f); if (IS_ERR(inner_map)) return inner_map; @@ -123,16 +108,24 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map, else inner_map = ERR_PTR(-EINVAL); - fdput(f); return inner_map; } -void bpf_map_fd_put_ptr(void *ptr) +void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) { - /* ptr->ops->map_free() has to go through one - * rcu grace period by itself. + struct bpf_map *inner_map = ptr; + + /* Defer the freeing of inner map according to the sleepable attribute + * of bpf program which owns the outer map, so unnecessary waiting for + * RCU tasks trace grace period can be avoided. */ - bpf_map_put(ptr); + if (need_defer) { + if (atomic64_read(&map->sleepable_refcnt)) + WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true); + else + WRITE_ONCE(inner_map->free_after_rcu_gp, true); + } + bpf_map_put(inner_map); } u32 bpf_map_fd_sys_lookup_elem(void *ptr) |
