summaryrefslogtreecommitdiff
path: root/kernel/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/memalloc.c44
-rw-r--r--kernel/bpf/mprog.c13
-rw-r--r--kernel/bpf/syscall.c21
-rw-r--r--kernel/bpf/tcx.c8
-rw-r--r--kernel/bpf/verifier.c14
5 files changed, 50 insertions, 50 deletions
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index cf1941516643..d93ddac283d4 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -965,37 +965,31 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
return !ret ? NULL : ret + LLIST_NODE_SZ;
}
-/* Most of the logic is taken from setup_kmalloc_cache_index_table() */
static __init int bpf_mem_cache_adjust_size(void)
{
- unsigned int size, index;
+ unsigned int size;
- /* Normally KMALLOC_MIN_SIZE is 8-bytes, but it can be
- * up-to 256-bytes.
+ /* Adjusting the indexes in size_index() according to the object_size
+ * of underlying slab cache, so bpf_mem_alloc() will select a
+ * bpf_mem_cache with unit_size equal to the object_size of
+ * the underlying slab cache.
+ *
+ * The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is
+ * 256-bytes, so only do adjustment for [8-bytes, 192-bytes].
*/
- size = KMALLOC_MIN_SIZE;
- if (size <= 192)
- index = size_index[(size - 1) / 8];
- else
- index = fls(size - 1) - 1;
- for (size = 8; size < KMALLOC_MIN_SIZE && size <= 192; size += 8)
- size_index[(size - 1) / 8] = index;
+ for (size = 192; size >= 8; size -= 8) {
+ unsigned int kmalloc_size, index;
- /* The minimal alignment is 64-bytes, so disable 96-bytes cache and
- * use 128-bytes cache instead.
- */
- if (KMALLOC_MIN_SIZE >= 64) {
- index = size_index[(128 - 1) / 8];
- for (size = 64 + 8; size <= 96; size += 8)
- size_index[(size - 1) / 8] = index;
- }
+ kmalloc_size = kmalloc_size_roundup(size);
+ if (kmalloc_size == size)
+ continue;
- /* The minimal alignment is 128-bytes, so disable 192-bytes cache and
- * use 256-bytes cache instead.
- */
- if (KMALLOC_MIN_SIZE >= 128) {
- index = fls(256 - 1) - 1;
- for (size = 128 + 8; size <= 192; size += 8)
+ if (kmalloc_size <= 192)
+ index = size_index[(kmalloc_size - 1) / 8];
+ else
+ index = fls(kmalloc_size - 1) - 1;
+ /* Only overwrite if necessary */
+ if (size_index[(size - 1) / 8] != index)
size_index[(size - 1) / 8] = index;
}
diff --git a/kernel/bpf/mprog.c b/kernel/bpf/mprog.c
index 32d2c4829eb8..1394168062e8 100644
--- a/kernel/bpf/mprog.c
+++ b/kernel/bpf/mprog.c
@@ -253,6 +253,9 @@ int bpf_mprog_attach(struct bpf_mprog_entry *entry,
goto out;
}
idx = tidx;
+ } else if (bpf_mprog_total(entry) == bpf_mprog_max()) {
+ ret = -ERANGE;
+ goto out;
}
if (flags & BPF_F_BEFORE) {
tidx = bpf_mprog_pos_before(entry, &rtuple);
@@ -398,14 +401,16 @@ int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
struct bpf_mprog_cp *cp;
struct bpf_prog *prog;
const u32 flags = 0;
+ u32 id, count = 0;
+ u64 revision = 1;
int i, ret = 0;
- u32 id, count;
- u64 revision;
if (attr->query.query_flags || attr->query.attach_flags)
return -EINVAL;
- revision = bpf_mprog_revision(entry);
- count = bpf_mprog_total(entry);
+ if (entry) {
+ revision = bpf_mprog_revision(entry);
+ count = bpf_mprog_total(entry);
+ }
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
return -EFAULT;
if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index eb01c31ed591..d77b2f8b9364 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3796,7 +3796,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
{
enum bpf_prog_type ptype;
struct bpf_prog *prog;
- u32 mask;
int ret;
if (CHECK_ATTR(BPF_PROG_ATTACH))
@@ -3805,10 +3804,16 @@ static int bpf_prog_attach(const union bpf_attr *attr)
ptype = attach_type_to_prog_type(attr->attach_type);
if (ptype == BPF_PROG_TYPE_UNSPEC)
return -EINVAL;
- mask = bpf_mprog_supported(ptype) ?
- BPF_F_ATTACH_MASK_MPROG : BPF_F_ATTACH_MASK_BASE;
- if (attr->attach_flags & ~mask)
- return -EINVAL;
+ if (bpf_mprog_supported(ptype)) {
+ if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
+ return -EINVAL;
+ } else {
+ if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
+ return -EINVAL;
+ if (attr->relative_fd ||
+ attr->expected_revision)
+ return -EINVAL;
+ }
prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
if (IS_ERR(prog))
@@ -3878,6 +3883,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
if (IS_ERR(prog))
return PTR_ERR(prog);
}
+ } else if (attr->attach_flags ||
+ attr->relative_fd ||
+ attr->expected_revision) {
+ return -EINVAL;
}
switch (ptype) {
@@ -3913,7 +3922,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
return ret;
}
-#define BPF_PROG_QUERY_LAST_FIELD query.link_attach_flags
+#define BPF_PROG_QUERY_LAST_FIELD query.revision
static int bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
diff --git a/kernel/bpf/tcx.c b/kernel/bpf/tcx.c
index 13f0b5dc8262..1338a13a8b64 100644
--- a/kernel/bpf/tcx.c
+++ b/kernel/bpf/tcx.c
@@ -123,7 +123,6 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
{
bool ingress = attr->query.attach_type == BPF_TCX_INGRESS;
struct net *net = current->nsproxy->net_ns;
- struct bpf_mprog_entry *entry;
struct net_device *dev;
int ret;
@@ -133,12 +132,7 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
ret = -ENODEV;
goto out;
}
- entry = tcx_entry_fetch(dev, ingress);
- if (!entry) {
- ret = -ENOENT;
- goto out;
- }
- ret = bpf_mprog_query(attr, uattr, entry);
+ ret = bpf_mprog_query(attr, uattr, tcx_entry_fetch(dev, ingress));
out:
rtnl_unlock();
return ret;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index bb78212fa5b2..873ade146f3d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -4047,11 +4047,9 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
bitmap_from_u64(mask, bt_reg_mask(bt));
for_each_set_bit(i, mask, 32) {
reg = &st->frame[0]->regs[i];
- if (reg->type != SCALAR_VALUE) {
- bt_clear_reg(bt, i);
- continue;
- }
- reg->precise = true;
+ bt_clear_reg(bt, i);
+ if (reg->type == SCALAR_VALUE)
+ reg->precise = true;
}
return 0;
}
@@ -14481,7 +14479,7 @@ static int check_return_code(struct bpf_verifier_env *env)
struct tnum enforce_attach_type_range = tnum_unknown;
const struct bpf_prog *prog = env->prog;
struct bpf_reg_state *reg;
- struct tnum range = tnum_range(0, 1);
+ struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0);
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
int err;
struct bpf_func_state *frame = env->cur_state->frame[0];
@@ -14529,8 +14527,8 @@ static int check_return_code(struct bpf_verifier_env *env)
return -EINVAL;
}
- if (!tnum_in(tnum_const(0), reg->var_off)) {
- verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
+ if (!tnum_in(const_0, reg->var_off)) {
+ verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0");
return -EINVAL;
}
return 0;