diff options
Diffstat (limited to 'tools')
243 files changed, 8389 insertions, 2144 deletions
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index d0ce5cfd3ac1..d5b5f2ab87a0 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -277,6 +277,7 @@ #define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */ #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */ #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */ +#define X86_FEATURE_XFD (10*32+ 4) /* "" eXtended Feature Disabling */ /* * Extended auxiliary flags: Linux defined - for features scattered in various @@ -298,6 +299,7 @@ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ +#define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 2ef1f6513c68..5a776a08f78c 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -504,4 +504,8 @@ struct kvm_pmu_event_filter { #define KVM_PMU_EVENT_ALLOW 0 #define KVM_PMU_EVENT_DENY 1 +/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */ +#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */ +#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */ + #endif /* _ASM_X86_KVM_H */ diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile index 692e1b947490..ac8487dcff1d 100644 --- a/tools/bpf/bpftool/Documentation/Makefile +++ b/tools/bpf/bpftool/Documentation/Makefile @@ -24,7 +24,7 @@ man: man8 man8: $(DOC_MAN8) RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null) -RST2MAN_OPTS += --verbose +RST2MAN_OPTS += --verbose --strip-comments list_pages = $(sort $(basename $(filter-out $(1),$(MAN8_RST)))) see_also = $(subst " ",, \ diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst index 4425d942dd39..342716f74ec4 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-btf ================ @@ -7,13 +9,14 @@ tool for inspection of BTF data :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **btf** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | {**-d** | **--debug** } | - { **-B** | **--base-btf** } } + *OPTIONS* := { |COMMON_OPTIONS| | { **-B** | **--base-btf** } } *COMMANDS* := { **dump** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index 8069d37dd991..a17e9aa314fd 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-cgroup ================ @@ -7,13 +9,14 @@ tool for inspection and simple manipulation of eBPF progs :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **cgroup** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-f** | **--bpffs** } } + *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } } *COMMANDS* := { **show** | **list** | **tree** | **attach** | **detach** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst index ab9f57ee4c3a..4ce9a77bc1e0 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + =============== bpftool-feature =============== @@ -7,12 +9,14 @@ tool for inspection of eBPF-related parameters for Linux kernel or net device :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **feature** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { |COMMON_OPTIONS| } *COMMANDS* := { **probe** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst index 2a137f8a4cea..bc276388f432 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-gen ================ @@ -7,13 +9,14 @@ tool for BPF code-generation :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **gen** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-L** | **--use-loader** } } + *OPTIONS* := { |COMMON_OPTIONS| | { **-L** | **--use-loader** } } *COMMAND* := { **object** | **skeleton** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-iter.rst b/tools/bpf/bpftool/Documentation/bpftool-iter.rst index 471f363a725a..84839d488621 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-iter.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-iter.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ============ bpftool-iter ============ @@ -7,12 +9,14 @@ tool to create BPF iterators :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **iter** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { |COMMON_OPTIONS| } *COMMANDS* := { **pin** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-link.rst b/tools/bpf/bpftool/Documentation/bpftool-link.rst index 9434349636a5..52a4eee4af54 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-link.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-link.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-link ================ @@ -7,13 +9,14 @@ tool for inspection and simple manipulation of eBPF links :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **link** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-f** | **--bpffs** } | { **-n** | **--nomount** } } + *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } } *COMMANDS* := { **show** | **list** | **pin** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 991d18fd84f2..7c188a598444 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-map ================ @@ -7,13 +9,14 @@ tool for inspection and simple manipulation of eBPF maps :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **map** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-f** | **--bpffs** } | { **-n** | **--nomount** } } + *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } } *COMMANDS* := { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** | diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst index 7ec57535a7c1..f4e0a516335a 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-net.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-net ================ @@ -7,12 +9,14 @@ tool for inspection of netdev/tc related bpf prog attachments :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **net** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { |COMMON_OPTIONS| } *COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst index ce52798a917d..5fea633a82f1 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-perf ================ @@ -7,12 +9,14 @@ tool for inspection of perf related bpf prog attachments :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **perf** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { |COMMON_OPTIONS| } *COMMANDS* := { **show** | **list** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index f27265bd589b..a2e9359e554c 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-prog ================ @@ -7,12 +9,14 @@ tool for inspection and simple manipulation of eBPF progs :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **prog** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | + *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } | { **-L** | **--use-loader** } } diff --git a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst index 02afc0fc14cb..ee53a122c0c7 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================== bpftool-struct_ops ================== @@ -7,12 +9,14 @@ tool to register/unregister/introspect BPF struct_ops :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **struct_ops** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { |COMMON_OPTIONS| } *COMMANDS* := { **show** | **list** | **dump** | **register** | **unregister** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst index 8ac86565c501..7084dd9fa2f8 100644 --- a/tools/bpf/bpftool/Documentation/bpftool.rst +++ b/tools/bpf/bpftool/Documentation/bpftool.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ BPFTOOL ================ @@ -7,6 +9,8 @@ tool for inspection and simple manipulation of eBPF programs and maps :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== @@ -18,8 +22,7 @@ SYNOPSIS *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** } - *OPTIONS* := { { **-V** | **--version** } | - { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { { **-V** | **--version** } | |COMMON_OPTIONS| } *MAP-COMMANDS* := { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** | diff --git a/tools/bpf/bpftool/Documentation/common_options.rst b/tools/bpf/bpftool/Documentation/common_options.rst index 75adf23202d8..908487b9c2ad 100644 --- a/tools/bpf/bpftool/Documentation/common_options.rst +++ b/tools/bpf/bpftool/Documentation/common_options.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + -h, --help Print short help message (similar to **bpftool help**). diff --git a/tools/bpf/bpftool/Documentation/substitutions.rst b/tools/bpf/bpftool/Documentation/substitutions.rst new file mode 100644 index 000000000000..ccf1ffa0686c --- /dev/null +++ b/tools/bpf/bpftool/Documentation/substitutions.rst @@ -0,0 +1,3 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +.. |COMMON_OPTIONS| replace:: { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | { **-l** | **--legacy** } diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 997a2865e04a..b4695df2ea3d 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -486,7 +486,6 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name) static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard) { - struct bpf_object_load_attr load_attr = {}; DECLARE_LIBBPF_OPTS(gen_loader_opts, opts); struct bpf_map *map; char ident[256]; @@ -496,12 +495,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h if (err) return err; - load_attr.obj = obj; - if (verifier_logs) - /* log_level1 + log_level2 + stats, but not stable UAPI */ - load_attr.log_level = 1 + 2 + 4; - - err = bpf_object__load_xattr(&load_attr); + err = bpf_object__load(obj); if (err) { p_err("failed to load object file"); goto out; @@ -719,6 +713,9 @@ static int do_skeleton(int argc, char **argv) if (obj_name[0] == '\0') get_obj_name(obj_name, file); opts.object_name = obj_name; + if (verifier_logs) + /* log_level1 + log_level2 + stats, but not stable UAPI */ + opts.kernel_log_level = 1 + 2 + 4; obj = bpf_object__open_mem(obj_data, file_sz, &opts); err = libbpf_get_error(obj); if (err) { diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 473791e87f7d..8b71500e7cb2 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -93,6 +93,7 @@ static int do_version(int argc, char **argv) jsonw_name(json_wtr, "features"); jsonw_start_object(json_wtr); /* features */ jsonw_bool_field(json_wtr, "libbfd", has_libbfd); + jsonw_bool_field(json_wtr, "libbpf_strict", !legacy_libbpf); jsonw_bool_field(json_wtr, "skeletons", has_skeletons); jsonw_end_object(json_wtr); /* features */ @@ -106,6 +107,10 @@ static int do_version(int argc, char **argv) printf(" libbfd"); nb_features++; } + if (!legacy_libbpf) { + printf("%s libbpf_strict", nb_features++ ? "," : ""); + nb_features++; + } if (has_skeletons) printf("%s skeletons", nb_features++ ? "," : ""); printf("\n"); @@ -400,6 +405,7 @@ int main(int argc, char **argv) { "legacy", no_argument, NULL, 'l' }, { 0 } }; + bool version_requested = false; int opt, ret; last_do_help = do_help; @@ -414,7 +420,8 @@ int main(int argc, char **argv) options, NULL)) >= 0) { switch (opt) { case 'V': - return do_version(argc, argv); + version_requested = true; + break; case 'h': return do_help(argc, argv); case 'p': @@ -479,6 +486,9 @@ int main(int argc, char **argv) if (argc < 0) usage(); + if (version_requested) + return do_version(argc, argv); + ret = cmd_select(cmds, argc, argv, do_help); if (json_output) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 25b258804f11..cc530a229812 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -1261,7 +1261,10 @@ static int do_pin(int argc, char **argv) static int do_create(int argc, char **argv) { - struct bpf_create_map_attr attr = { NULL, }; + LIBBPF_OPTS(bpf_map_create_opts, attr); + enum bpf_map_type map_type = BPF_MAP_TYPE_UNSPEC; + __u32 key_size = 0, value_size = 0, max_entries = 0; + const char *map_name = NULL; const char *pinfile; int err = -1, fd; @@ -1276,30 +1279,30 @@ static int do_create(int argc, char **argv) if (is_prefix(*argv, "type")) { NEXT_ARG(); - if (attr.map_type) { + if (map_type) { p_err("map type already specified"); goto exit; } - attr.map_type = map_type_from_str(*argv); - if ((int)attr.map_type < 0) { + map_type = map_type_from_str(*argv); + if ((int)map_type < 0) { p_err("unrecognized map type: %s", *argv); goto exit; } NEXT_ARG(); } else if (is_prefix(*argv, "name")) { NEXT_ARG(); - attr.name = GET_ARG(); + map_name = GET_ARG(); } else if (is_prefix(*argv, "key")) { - if (parse_u32_arg(&argc, &argv, &attr.key_size, + if (parse_u32_arg(&argc, &argv, &key_size, "key size")) goto exit; } else if (is_prefix(*argv, "value")) { - if (parse_u32_arg(&argc, &argv, &attr.value_size, + if (parse_u32_arg(&argc, &argv, &value_size, "value size")) goto exit; } else if (is_prefix(*argv, "entries")) { - if (parse_u32_arg(&argc, &argv, &attr.max_entries, + if (parse_u32_arg(&argc, &argv, &max_entries, "max entries")) goto exit; } else if (is_prefix(*argv, "flags")) { @@ -1340,14 +1343,14 @@ static int do_create(int argc, char **argv) } } - if (!attr.name) { + if (!map_name) { p_err("map name not specified"); goto exit; } set_max_rlimit(); - fd = bpf_create_map_xattr(&attr); + fd = bpf_map_create(map_type, map_name, key_size, value_size, max_entries, &attr); if (fd < 0) { p_err("map create failed: %s", strerror(errno)); goto exit; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index e47e8b06cc3d..f874896c4154 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1464,7 +1464,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts, .relaxed_maps = relaxed_maps, ); - struct bpf_object_load_attr load_attr = { 0 }; enum bpf_attach_type expected_attach_type; struct map_replace *map_replace = NULL; struct bpf_program *prog = NULL, *pos; @@ -1598,6 +1597,10 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) set_max_rlimit(); + if (verifier_logs) + /* log_level1 + log_level2 + stats, but not stable UAPI */ + open_opts.kernel_log_level = 1 + 2 + 4; + obj = bpf_object__open_file(file, &open_opts); if (libbpf_get_error(obj)) { p_err("failed to open object file"); @@ -1677,12 +1680,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) goto err_close_obj; } - load_attr.obj = obj; - if (verifier_logs) - /* log_level1 + log_level2 + stats, but not stable UAPI */ - load_attr.log_level = 1 + 2 + 4; - - err = bpf_object__load_xattr(&load_attr); + err = bpf_object__load(obj); if (err) { p_err("failed to load object file"); goto err_close_obj; @@ -1774,17 +1772,19 @@ static int try_loader(struct gen_loader_opts *gen) sizeof(struct bpf_prog_desc)); int log_buf_sz = (1u << 24) - 1; int err, fds_before, fd_delta; - char *log_buf; + char *log_buf = NULL; ctx = alloca(ctx_sz); memset(ctx, 0, ctx_sz); ctx->sz = ctx_sz; - ctx->log_level = 1; - ctx->log_size = log_buf_sz; - log_buf = malloc(log_buf_sz); - if (!log_buf) - return -ENOMEM; - ctx->log_buf = (long) log_buf; + if (verifier_logs) { + ctx->log_level = 1 + 2 + 4; + ctx->log_size = log_buf_sz; + log_buf = malloc(log_buf_sz); + if (!log_buf) + return -ENOMEM; + ctx->log_buf = (long) log_buf; + } opts.ctx = ctx; opts.data = gen->data; opts.data_sz = gen->data_sz; @@ -1793,9 +1793,9 @@ static int try_loader(struct gen_loader_opts *gen) fds_before = count_open_fds(); err = bpf_load_and_run(&opts); fd_delta = count_open_fds() - fds_before; - if (err < 0) { + if (err < 0 || verifier_logs) { fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf); - if (fd_delta) + if (fd_delta && err < 0) fprintf(stderr, "loader prog leaked %d FDs\n", fd_delta); } @@ -1807,7 +1807,6 @@ static int do_loader(int argc, char **argv) { DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts); DECLARE_LIBBPF_OPTS(gen_loader_opts, gen); - struct bpf_object_load_attr load_attr = {}; struct bpf_object *obj; const char *file; int err = 0; @@ -1816,6 +1815,10 @@ static int do_loader(int argc, char **argv) return -1; file = GET_ARG(); + if (verifier_logs) + /* log_level1 + log_level2 + stats, but not stable UAPI */ + open_opts.kernel_log_level = 1 + 2 + 4; + obj = bpf_object__open_file(file, &open_opts); if (libbpf_get_error(obj)) { p_err("failed to open object file"); @@ -1826,12 +1829,7 @@ static int do_loader(int argc, char **argv) if (err) goto err_close_obj; - load_attr.obj = obj; - if (verifier_logs) - /* log_level1 + log_level2 + stats, but not stable UAPI */ - load_attr.log_level = 1 + 2 + 4; - - err = bpf_object__load_xattr(&load_attr); + err = bpf_object__load(obj); if (err) { p_err("failed to load object file"); goto err_close_obj; diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c index cbdca37a53f0..2f693b082bdb 100644 --- a/tools/bpf/bpftool/struct_ops.c +++ b/tools/bpf/bpftool/struct_ops.c @@ -479,7 +479,7 @@ static int do_unregister(int argc, char **argv) static int do_register(int argc, char **argv) { - struct bpf_object_load_attr load_attr = {}; + LIBBPF_OPTS(bpf_object_open_opts, open_opts); const struct bpf_map_def *def; struct bpf_map_info info = {}; __u32 info_len = sizeof(info); @@ -494,18 +494,17 @@ static int do_register(int argc, char **argv) file = GET_ARG(); - obj = bpf_object__open(file); + if (verifier_logs) + /* log_level1 + log_level2 + stats, but not stable UAPI */ + open_opts.kernel_log_level = 1 + 2 + 4; + + obj = bpf_object__open_file(file, &open_opts); if (libbpf_get_error(obj)) return -1; set_max_rlimit(); - load_attr.obj = obj; - if (verifier_logs) - /* log_level1 + log_level2 + stats, but not stable UAPI */ - load_attr.log_level = 1 + 2 + 4; - - if (bpf_object__load_xattr(&load_attr)) { + if (bpf_object__load(obj)) { bpf_object__close(obj); return -1; } diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index a59cb0ee609c..5d26f3c6f918 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -83,6 +83,7 @@ struct btf_id { int cnt; }; int addr_cnt; + bool is_set; Elf64_Addr addr[ADDR_CNT]; }; @@ -167,7 +168,7 @@ static struct btf_id *btf_id__find(struct rb_root *root, const char *name) return NULL; } -static struct btf_id* +static struct btf_id * btf_id__add(struct rb_root *root, char *name, bool unique) { struct rb_node **p = &root->rb_node; @@ -451,8 +452,10 @@ static int symbols_collect(struct object *obj) * in symbol's size, together with 'cnt' field hence * that - 1. */ - if (id) + if (id) { id->cnt = sym.st_size / sizeof(int) - 1; + id->is_set = true; + } } else { pr_err("FAILED unsupported prefix %s\n", prefix); return -1; @@ -568,9 +571,8 @@ static int id_patch(struct object *obj, struct btf_id *id) int *ptr = data->d_buf; int i; - if (!id->id) { + if (!id->id && !id->is_set) pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name); - } for (i = 0; i < id->addr_cnt; i++) { unsigned long addr = id->addr[i]; @@ -730,7 +732,8 @@ int main(int argc, const char **argv) if (obj.efile.idlist_shndx == -1 || obj.efile.symbols_shndx == -1) { pr_debug("Cannot find .BTF_ids or symbols sections, nothing to do\n"); - return 0; + err = 0; + goto out; } if (symbols_collect(&obj)) diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 45a9a59828c3..ae61f464043a 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -48,7 +48,6 @@ FEATURE_TESTS_BASIC := \ numa_num_possible_cpus \ libperl \ libpython \ - libpython-version \ libslang \ libslang-include-subdir \ libtraceevent \ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 0a3244ad9673..1480910c792e 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -32,7 +32,6 @@ FILES= \ test-numa_num_possible_cpus.bin \ test-libperl.bin \ test-libpython.bin \ - test-libpython-version.bin \ test-libslang.bin \ test-libslang-include-subdir.bin \ test-libtraceevent.bin \ @@ -227,9 +226,6 @@ $(OUTPUT)test-libperl.bin: $(OUTPUT)test-libpython.bin: $(BUILD) $(FLAGS_PYTHON_EMBED) -$(OUTPUT)test-libpython-version.bin: - $(BUILD) - $(OUTPUT)test-libbfd.bin: $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index 920439527291..5ffafb967b6e 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c @@ -14,10 +14,6 @@ # include "test-libpython.c" #undef main -#define main main_test_libpython_version -# include "test-libpython-version.c" -#undef main - #define main main_test_libperl # include "test-libperl.c" #undef main @@ -177,7 +173,6 @@ int main(int argc, char *argv[]) { main_test_libpython(); - main_test_libpython_version(); main_test_libperl(); main_test_hello(); main_test_libelf(); @@ -200,7 +195,6 @@ int main(int argc, char *argv[]) main_test_timerfd(); main_test_stackprotector_all(); main_test_libdw_dwarf_unwind(); - main_test_sync_compare_and_swap(argc, argv); main_test_zlib(); main_test_pthread_attr_setaffinity_np(); main_test_pthread_barrier(); diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c index 82070eadfc07..727d22e34a6e 100644 --- a/tools/build/feature/test-bpf.c +++ b/tools/build/feature/test-bpf.c @@ -14,6 +14,12 @@ # define __NR_bpf 349 # elif defined(__s390__) # define __NR_bpf 351 +# elif defined(__mips__) && defined(_ABIO32) +# define __NR_bpf 4355 +# elif defined(__mips__) && defined(_ABIN32) +# define __NR_bpf 6319 +# elif defined(__mips__) && defined(_ABI64) +# define __NR_bpf 5315 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c deleted file mode 100644 index 47714b942d4d..000000000000 --- a/tools/build/feature/test-libpython-version.c +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <Python.h> - -#if PY_VERSION_HEX >= 0x03000000 - #error -#endif - -int main(void) -{ - return 0; -} diff --git a/tools/include/linux/debug_locks.h b/tools/include/linux/debug_locks.h deleted file mode 100644 index 72d595ce764a..000000000000 --- a/tools/include/linux/debug_locks.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_ -#define _LIBLOCKDEP_DEBUG_LOCKS_H_ - -#include <stddef.h> -#include <linux/compiler.h> -#include <asm/bug.h> - -#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x) - -extern bool debug_locks; -extern bool debug_locks_silent; - -#endif diff --git a/tools/include/linux/hardirq.h b/tools/include/linux/hardirq.h deleted file mode 100644 index b25580b6a9be..000000000000 --- a/tools/include/linux/hardirq.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_ -#define _LIBLOCKDEP_LINUX_HARDIRQ_H_ - -#define SOFTIRQ_BITS 0UL -#define HARDIRQ_BITS 0UL -#define SOFTIRQ_SHIFT 0UL -#define HARDIRQ_SHIFT 0UL -#define hardirq_count() 0UL -#define softirq_count() 0UL - -#endif diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h deleted file mode 100644 index 501262aee8ff..000000000000 --- a/tools/include/linux/irqflags.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ -#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ - -# define lockdep_hardirq_context() 0 -# define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled() 0 -# define lockdep_softirqs_enabled(p) 0 -# define lockdep_hardirq_enter() do { } while (0) -# define lockdep_hardirq_exit() do { } while (0) -# define lockdep_softirq_enter() do { } while (0) -# define lockdep_softirq_exit() do { } while (0) -# define INIT_TRACE_IRQFLAGS - -# define stop_critical_timings() do { } while (0) -# define start_critical_timings() do { } while (0) - -#define raw_local_irq_disable() do { } while (0) -#define raw_local_irq_enable() do { } while (0) -#define raw_local_irq_save(flags) ((flags) = 0) -#define raw_local_irq_restore(flags) ((void)(flags)) -#define raw_local_save_flags(flags) ((flags) = 0) -#define raw_irqs_disabled_flags(flags) ((void)(flags)) -#define raw_irqs_disabled() 0 -#define raw_safe_halt() - -#define local_irq_enable() do { } while (0) -#define local_irq_disable() do { } while (0) -#define local_irq_save(flags) ((flags) = 0) -#define local_irq_restore(flags) ((void)(flags)) -#define local_save_flags(flags) ((flags) = 0) -#define irqs_disabled() (1) -#define irqs_disabled_flags(flags) ((void)(flags), 0) -#define safe_halt() do { } while (0) - -#define trace_lock_release(x, y) -#define trace_lock_acquire(a, b, c, d, e, f, g) - -#endif diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h index a7e54a08fb54..3e8df500cfbd 100644 --- a/tools/include/linux/kernel.h +++ b/tools/include/linux/kernel.h @@ -7,6 +7,7 @@ #include <assert.h> #include <linux/build_bug.h> #include <linux/compiler.h> +#include <linux/math.h> #include <endian.h> #include <byteswap.h> @@ -14,8 +15,6 @@ #define UINT_MAX (~0U) #endif -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) - #define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1) #define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) @@ -52,15 +51,6 @@ _min1 < _min2 ? _min1 : _min2; }) #endif -#ifndef roundup -#define roundup(x, y) ( \ -{ \ - const typeof(y) __y = y; \ - (((x) + (__y - 1)) / __y) * __y; \ -} \ -) -#endif - #ifndef BUG_ON #ifdef NDEBUG #define BUG_ON(cond) do { if (cond) {} } while (0) @@ -104,16 +94,6 @@ int scnprintf_pad(char * buf, size_t size, const char * fmt, ...); #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) -/* - * This looks more complex than it should be. But we need to - * get the type for the ~ right in round_down (it needs to be - * as wide as the result!), and we want to evaluate the macro - * arguments just once each. - */ -#define __round_mask(x, y) ((__typeof__(x))((y)-1)) -#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) -#define round_down(x, y) ((x) & ~__round_mask(x, y)) - #define current_gfp_context(k) 0 #define synchronize_rcu() diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h deleted file mode 100644 index e56997288f2b..000000000000 --- a/tools/include/linux/lockdep.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LOCKDEP_H_ -#define _LIBLOCKDEP_LOCKDEP_H_ - -#include <sys/prctl.h> -#include <sys/syscall.h> -#include <string.h> -#include <limits.h> -#include <linux/utsname.h> -#include <linux/compiler.h> -#include <linux/export.h> -#include <linux/kern_levels.h> -#include <linux/err.h> -#include <linux/rcu.h> -#include <linux/list.h> -#include <linux/hardirq.h> -#include <unistd.h> - -#define MAX_LOCK_DEPTH 63UL - -#define asmlinkage -#define __visible - -#include "../../../include/linux/lockdep.h" - -struct task_struct { - u64 curr_chain_key; - int lockdep_depth; - unsigned int lockdep_recursion; - struct held_lock held_locks[MAX_LOCK_DEPTH]; - gfp_t lockdep_reclaim_gfp; - int pid; - int state; - char comm[17]; -}; - -#define TASK_RUNNING 0 - -extern struct task_struct *__curr(void); - -#define current (__curr()) - -static inline int debug_locks_off(void) -{ - return 1; -} - -#define task_pid_nr(tsk) ((tsk)->pid) - -#define KSYM_NAME_LEN 128 -#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) -#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) -#define pr_warn pr_err -#define pr_cont pr_err - -#define list_del_rcu list_del - -#define atomic_t unsigned long -#define atomic_inc(x) ((*(x))++) - -#define print_tainted() "" -#define static_obj(x) 1 - -#define debug_show_all_locks() -extern void debug_check_no_locks_held(void); - -static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr) -{ - return false; -} - -#endif diff --git a/tools/include/linux/math.h b/tools/include/linux/math.h new file mode 100644 index 000000000000..4e7af99ec9eb --- /dev/null +++ b/tools/include/linux/math.h @@ -0,0 +1,25 @@ +#ifndef _TOOLS_MATH_H +#define _TOOLS_MATH_H + +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) + +#ifndef roundup +#define roundup(x, y) ( \ +{ \ + const typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ +) +#endif + +#endif diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h deleted file mode 100644 index 8b3b03b64fda..000000000000 --- a/tools/include/linux/proc_fs.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H -#define _TOOLS_INCLUDE_LINUX_PROC_FS_H - -#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */ diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h index c934572d935c..622266b197d0 100644 --- a/tools/include/linux/spinlock.h +++ b/tools/include/linux/spinlock.h @@ -37,6 +37,4 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex) return true; } -#include <linux/lockdep.h> - #endif diff --git a/tools/include/linux/stacktrace.h b/tools/include/linux/stacktrace.h deleted file mode 100644 index ae343ac35bfa..000000000000 --- a/tools/include/linux/stacktrace.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_ -#define _LIBLOCKDEP_LINUX_STACKTRACE_H_ - -#include <execinfo.h> - -struct stack_trace { - unsigned int nr_entries, max_entries; - unsigned long *entries; - int skip; -}; - -static inline void print_stack_trace(struct stack_trace *trace, int spaces) -{ - backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); -} - -#define save_stack_trace(trace) \ - ((trace)->nr_entries = \ - backtrace((void **)(trace)->entries, (trace)->max_entries)) - -static inline int dump_stack(void) -{ - void *array[64]; - size_t size; - - size = backtrace(array, 64); - backtrace_symbols_fd(array, size, 1); - - return 0; -} - -#endif diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 6297eafdc40f..c26871263f1f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1342,8 +1342,10 @@ union bpf_attr { /* or valid module BTF object fd or 0 to attach to vmlinux */ __u32 attach_btf_obj_fd; }; - __u32 :32; /* pad */ + __u32 core_relo_cnt; /* number of bpf_core_relo */ __aligned_u64 fd_array; /* array of FDs */ + __aligned_u64 core_relos; + __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -1744,7 +1746,7 @@ union bpf_attr { * if the maximum number of tail calls has been reached for this * chain of programs. This limit is defined in the kernel by the * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), - * which is currently set to 32. + * which is currently set to 33. * Return * 0 on success, or a negative error in case of failure. * @@ -4957,6 +4959,30 @@ union bpf_attr { * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. * **-EBUSY** if failed to try lock mmap_lock. * **-EINVAL** for invalid **flags**. + * + * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags) + * Description + * For **nr_loops**, call **callback_fn** function + * with **callback_ctx** as the context parameter. + * The **callback_fn** should be a static function and + * the **callback_ctx** should be a pointer to the stack. + * The **flags** is used to control certain aspects of the helper. + * Currently, the **flags** must be 0. Currently, nr_loops is + * limited to 1 << 23 (~8 million) loops. + * + * long (\*callback_fn)(u32 index, void \*ctx); + * + * where **index** is the current index in the loop. The index + * is zero-indexed. + * + * If **callback_fn** returns 0, the helper will continue to the next + * loop. If return value is 1, the helper will skip the rest of + * the loops and return. Other return values are not used now, + * and will be rejected by the verifier. + * + * Return + * The number of loops performed, **-EINVAL** for invalid **flags**, + * **-E2BIG** if **nr_loops** exceeds the maximum number of loops. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5140,6 +5166,7 @@ union bpf_attr { FN(skc_to_unix_sock), \ FN(kallsyms_lookup_name), \ FN(find_vma), \ + FN(loop), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -6349,4 +6376,78 @@ enum { BTF_F_ZERO = (1ULL << 3), }; +/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value + * has to be adjusted by relocations. It is emitted by llvm and passed to + * libbpf and later to the kernel. + */ +enum bpf_core_relo_kind { + BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */ + BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ + BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ + BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ + BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ + BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */ + BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */ + BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */ + BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ + BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */ +}; + +/* + * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf + * and from libbpf to the kernel. + * + * CO-RE relocation captures the following data: + * - insn_off - instruction offset (in bytes) within a BPF program that needs + * its insn->imm field to be relocated with actual field info; + * - type_id - BTF type ID of the "root" (containing) entity of a relocatable + * type or field; + * - access_str_off - offset into corresponding .BTF string section. String + * interpretation depends on specific relocation kind: + * - for field-based relocations, string encodes an accessed field using + * a sequence of field and array indices, separated by colon (:). It's + * conceptually very close to LLVM's getelementptr ([0]) instruction's + * arguments for identifying offset to a field. + * - for type-based relocations, strings is expected to be just "0"; + * - for enum value-based relocations, string contains an index of enum + * value within its enum type; + * - kind - one of enum bpf_core_relo_kind; + * + * Example: + * struct sample { + * int a; + * struct { + * int b[10]; + * }; + * }; + * + * struct sample *s = ...; + * int *x = &s->a; // encoded as "0:0" (a is field #0) + * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, + * // b is field #0 inside anon struct, accessing elem #5) + * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) + * + * type_id for all relocs in this example will capture BTF type id of + * `struct sample`. + * + * Such relocation is emitted when using __builtin_preserve_access_index() + * Clang built-in, passing expression that captures field address, e.g.: + * + * bpf_probe_read(&dst, sizeof(dst), + * __builtin_preserve_access_index(&src->a.b.c)); + * + * In this case Clang will emit field relocation recording necessary data to + * be able to find offset of embedded `a.b.c` field within `src` struct. + * + * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction + */ +struct bpf_core_relo { + __u32 insn_off; + __u32 type_id; + __u32 access_str_off; + enum bpf_core_relo_kind kind; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index b3610fdd1fee..4ac53b30b6dc 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -7,24 +7,23 @@ /* This struct should be in sync with struct rtnl_link_stats64 */ struct rtnl_link_stats { - __u32 rx_packets; /* total packets received */ - __u32 tx_packets; /* total packets transmitted */ - __u32 rx_bytes; /* total bytes received */ - __u32 tx_bytes; /* total bytes transmitted */ - __u32 rx_errors; /* bad packets received */ - __u32 tx_errors; /* packet transmit problems */ - __u32 rx_dropped; /* no space in linux buffers */ - __u32 tx_dropped; /* no space available in linux */ - __u32 multicast; /* multicast packets received */ + __u32 rx_packets; + __u32 tx_packets; + __u32 rx_bytes; + __u32 tx_bytes; + __u32 rx_errors; + __u32 tx_errors; + __u32 rx_dropped; + __u32 tx_dropped; + __u32 multicast; __u32 collisions; - /* detailed rx_errors: */ __u32 rx_length_errors; - __u32 rx_over_errors; /* receiver ring buff overflow */ - __u32 rx_crc_errors; /* recved pkt with crc error */ - __u32 rx_frame_errors; /* recv'd frame alignment error */ - __u32 rx_fifo_errors; /* recv'r fifo overrun */ - __u32 rx_missed_errors; /* receiver missed packet */ + __u32 rx_over_errors; + __u32 rx_crc_errors; + __u32 rx_frame_errors; + __u32 rx_fifo_errors; + __u32 rx_missed_errors; /* detailed tx_errors */ __u32 tx_aborted_errors; @@ -37,29 +36,201 @@ struct rtnl_link_stats { __u32 rx_compressed; __u32 tx_compressed; - __u32 rx_nohandler; /* dropped, no handler found */ + __u32 rx_nohandler; }; -/* The main device statistics structure */ +/** + * struct rtnl_link_stats64 - The main device statistics structure. + * + * @rx_packets: Number of good packets received by the interface. + * For hardware interfaces counts all good packets received from the device + * by the host, including packets which host had to drop at various stages + * of processing (even in the driver). + * + * @tx_packets: Number of packets successfully transmitted. + * For hardware interfaces counts packets which host was able to successfully + * hand over to the device, which does not necessarily mean that packets + * had been successfully transmitted out of the device, only that device + * acknowledged it copied them out of host memory. + * + * @rx_bytes: Number of good received bytes, corresponding to @rx_packets. + * + * For IEEE 802.3 devices should count the length of Ethernet Frames + * excluding the FCS. + * + * @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets. + * + * For IEEE 802.3 devices should count the length of Ethernet Frames + * excluding the FCS. + * + * @rx_errors: Total number of bad packets received on this network device. + * This counter must include events counted by @rx_length_errors, + * @rx_crc_errors, @rx_frame_errors and other errors not otherwise + * counted. + * + * @tx_errors: Total number of transmit problems. + * This counter must include events counter by @tx_aborted_errors, + * @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors, + * @tx_window_errors and other errors not otherwise counted. + * + * @rx_dropped: Number of packets received but not processed, + * e.g. due to lack of resources or unsupported protocol. + * For hardware interfaces this counter may include packets discarded + * due to L2 address filtering but should not include packets dropped + * by the device due to buffer exhaustion which are counted separately in + * @rx_missed_errors (since procfs folds those two counters together). + * + * @tx_dropped: Number of packets dropped on their way to transmission, + * e.g. due to lack of resources. + * + * @multicast: Multicast packets received. + * For hardware interfaces this statistic is commonly calculated + * at the device level (unlike @rx_packets) and therefore may include + * packets which did not reach the host. + * + * For IEEE 802.3 devices this counter may be equivalent to: + * + * - 30.3.1.1.21 aMulticastFramesReceivedOK + * + * @collisions: Number of collisions during packet transmissions. + * + * @rx_length_errors: Number of packets dropped due to invalid length. + * Part of aggregate "frame" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices this counter should be equivalent to a sum + * of the following attributes: + * + * - 30.3.1.1.23 aInRangeLengthErrors + * - 30.3.1.1.24 aOutOfRangeLengthField + * - 30.3.1.1.25 aFrameTooLongErrors + * + * @rx_over_errors: Receiver FIFO overflow event counter. + * + * Historically the count of overflow events. Such events may be + * reported in the receive descriptors or via interrupts, and may + * not correspond one-to-one with dropped packets. + * + * The recommended interpretation for high speed interfaces is - + * number of packets dropped because they did not fit into buffers + * provided by the host, e.g. packets larger than MTU or next buffer + * in the ring was not available for a scatter transfer. + * + * Part of aggregate "frame" errors in `/proc/net/dev`. + * + * This statistics was historically used interchangeably with + * @rx_fifo_errors. + * + * This statistic corresponds to hardware events and is not commonly used + * on software devices. + * + * @rx_crc_errors: Number of packets received with a CRC error. + * Part of aggregate "frame" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices this counter must be equivalent to: + * + * - 30.3.1.1.6 aFrameCheckSequenceErrors + * + * @rx_frame_errors: Receiver frame alignment errors. + * Part of aggregate "frame" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices this counter should be equivalent to: + * + * - 30.3.1.1.7 aAlignmentErrors + * + * @rx_fifo_errors: Receiver FIFO error counter. + * + * Historically the count of overflow events. Those events may be + * reported in the receive descriptors or via interrupts, and may + * not correspond one-to-one with dropped packets. + * + * This statistics was used interchangeably with @rx_over_errors. + * Not recommended for use in drivers for high speed interfaces. + * + * This statistic is used on software devices, e.g. to count software + * packet queue overflow (can) or sequencing errors (GRE). + * + * @rx_missed_errors: Count of packets missed by the host. + * Folded into the "drop" counter in `/proc/net/dev`. + * + * Counts number of packets dropped by the device due to lack + * of buffer space. This usually indicates that the host interface + * is slower than the network interface, or host is not keeping up + * with the receive packet rate. + * + * This statistic corresponds to hardware events and is not used + * on software devices. + * + * @tx_aborted_errors: + * Part of aggregate "carrier" errors in `/proc/net/dev`. + * For IEEE 802.3 devices capable of half-duplex operation this counter + * must be equivalent to: + * + * - 30.3.1.1.11 aFramesAbortedDueToXSColls + * + * High speed interfaces may use this counter as a general device + * discard counter. + * + * @tx_carrier_errors: Number of frame transmission errors due to loss + * of carrier during transmission. + * Part of aggregate "carrier" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices this counter must be equivalent to: + * + * - 30.3.1.1.13 aCarrierSenseErrors + * + * @tx_fifo_errors: Number of frame transmission errors due to device + * FIFO underrun / underflow. This condition occurs when the device + * begins transmission of a frame but is unable to deliver the + * entire frame to the transmitter in time for transmission. + * Part of aggregate "carrier" errors in `/proc/net/dev`. + * + * @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for + * old half-duplex Ethernet. + * Part of aggregate "carrier" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices possibly equivalent to: + * + * - 30.3.2.1.4 aSQETestErrors + * + * @tx_window_errors: Number of frame transmission errors due + * to late collisions (for Ethernet - after the first 64B of transmission). + * Part of aggregate "carrier" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices this counter must be equivalent to: + * + * - 30.3.1.1.10 aLateCollisions + * + * @rx_compressed: Number of correctly received compressed packets. + * This counters is only meaningful for interfaces which support + * packet compression (e.g. CSLIP, PPP). + * + * @tx_compressed: Number of transmitted compressed packets. + * This counters is only meaningful for interfaces which support + * packet compression (e.g. CSLIP, PPP). + * + * @rx_nohandler: Number of packets received on the interface + * but dropped by the networking stack because the device is + * not designated to receive packets (e.g. backup link in a bond). + */ struct rtnl_link_stats64 { - __u64 rx_packets; /* total packets received */ - __u64 tx_packets; /* total packets transmitted */ - __u64 rx_bytes; /* total bytes received */ - __u64 tx_bytes; /* total bytes transmitted */ - __u64 rx_errors; /* bad packets received */ - __u64 tx_errors; /* packet transmit problems */ - __u64 rx_dropped; /* no space in linux buffers */ - __u64 tx_dropped; /* no space available in linux */ - __u64 multicast; /* multicast packets received */ + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; __u64 collisions; /* detailed rx_errors: */ __u64 rx_length_errors; - __u64 rx_over_errors; /* receiver ring buff overflow */ - __u64 rx_crc_errors; /* recved pkt with crc error */ - __u64 rx_frame_errors; /* recv'd frame alignment error */ - __u64 rx_fifo_errors; /* recv'r fifo overrun */ - __u64 rx_missed_errors; /* receiver missed packet */ + __u64 rx_over_errors; + __u64 rx_crc_errors; + __u64 rx_frame_errors; + __u64 rx_fifo_errors; + __u64 rx_missed_errors; /* detailed tx_errors */ __u64 tx_aborted_errors; @@ -71,8 +242,7 @@ struct rtnl_link_stats64 { /* for cslip etc */ __u64 rx_compressed; __u64 tx_compressed; - - __u64 rx_nohandler; /* dropped, no handler found */ + __u64 rx_nohandler; }; /* The struct should be in sync with struct ifmap */ @@ -170,12 +340,29 @@ enum { IFLA_PROP_LIST, IFLA_ALT_IFNAME, /* Alternative ifname */ IFLA_PERM_ADDRESS, + IFLA_PROTO_DOWN_REASON, + + /* device (sysfs) name as parent, used instead + * of IFLA_LINK where there's no parent netdev + */ + IFLA_PARENT_DEV_NAME, + IFLA_PARENT_DEV_BUS_NAME, + __IFLA_MAX }; #define IFLA_MAX (__IFLA_MAX - 1) +enum { + IFLA_PROTO_DOWN_REASON_UNSPEC, + IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */ + IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */ + + __IFLA_PROTO_DOWN_REASON_CNT, + IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1 +}; + /* backwards compatibility for userspace */ #ifndef __KERNEL__ #define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg)))) @@ -293,6 +480,7 @@ enum { IFLA_BR_MCAST_MLD_VERSION, IFLA_BR_VLAN_STATS_PER_PORT, IFLA_BR_MULTI_BOOLOPT, + IFLA_BR_MCAST_QUERIER_STATE, __IFLA_BR_MAX, }; @@ -346,6 +534,8 @@ enum { IFLA_BRPORT_BACKUP_PORT, IFLA_BRPORT_MRP_RING_OPEN, IFLA_BRPORT_MRP_IN_OPEN, + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -433,6 +623,7 @@ enum macvlan_macaddr_mode { }; #define MACVLAN_FLAG_NOPROMISC 1 +#define MACVLAN_FLAG_NODST 2 /* skip dst macvlan if matching src macvlan */ /* VRF section */ enum { @@ -597,6 +788,18 @@ enum ifla_geneve_df { GENEVE_DF_MAX = __GENEVE_DF_END - 1, }; +/* Bareudp section */ +enum { + IFLA_BAREUDP_UNSPEC, + IFLA_BAREUDP_PORT, + IFLA_BAREUDP_ETHERTYPE, + IFLA_BAREUDP_SRCPORT_MIN, + IFLA_BAREUDP_MULTIPROTO_MODE, + __IFLA_BAREUDP_MAX +}; + +#define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1) + /* PPP section */ enum { IFLA_PPP_UNSPEC, @@ -655,6 +858,7 @@ enum { IFLA_BOND_TLB_DYNAMIC_LB, IFLA_BOND_PEER_NOTIF_DELAY, IFLA_BOND_AD_LACP_ACTIVE, + IFLA_BOND_MISSED_MAX, __IFLA_BOND_MAX, }; @@ -899,7 +1103,14 @@ enum { #define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1) -/* HSR section */ +/* HSR/PRP section, both uses same interface */ + +/* Different redundancy protocols for hsr device */ +enum { + HSR_PROTOCOL_HSR, + HSR_PROTOCOL_PRP, + HSR_PROTOCOL_MAX, +}; enum { IFLA_HSR_UNSPEC, @@ -909,6 +1120,9 @@ enum { IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ IFLA_HSR_SEQ_NR, IFLA_HSR_VERSION, /* HSR version */ + IFLA_HSR_PROTOCOL, /* Indicate different protocol than + * HSR. For example PRP. + */ __IFLA_HSR_MAX, }; @@ -1033,6 +1247,8 @@ enum { #define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5) enum { IFLA_RMNET_UNSPEC, @@ -1048,4 +1264,14 @@ struct ifla_rmnet_flags { __u32 mask; }; +/* MCTP section */ + +enum { + IFLA_MCTP_UNSPEC, + IFLA_MCTP_NET, + __IFLA_MCTP_MAX, +}; + +#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1) + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index a067410ebea5..1daa45268de2 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -269,6 +269,7 @@ struct kvm_xen_exit { #define KVM_EXIT_AP_RESET_HOLD 32 #define KVM_EXIT_X86_BUS_LOCK 33 #define KVM_EXIT_XEN 34 +#define KVM_EXIT_RISCV_SBI 35 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -397,13 +398,23 @@ struct kvm_run { * "ndata" is correct, that new fields are enumerated in "flags", * and that each flag enumerates fields that are 64-bit aligned * and sized (so that ndata+internal.data[] is valid/accurate). + * + * Space beyond the defined fields may be used to store arbitrary + * debug information relating to the emulation failure. It is + * accounted for in "ndata" but the format is unspecified and is + * not represented in "flags". Any such information is *not* ABI! */ struct { __u32 suberror; __u32 ndata; __u64 flags; - __u8 insn_size; - __u8 insn_bytes[15]; + union { + struct { + __u8 insn_size; + __u8 insn_bytes[15]; + }; + }; + /* Arbitrary debug data may follow. */ } emulation_failure; /* KVM_EXIT_OSI */ struct { @@ -469,6 +480,13 @@ struct kvm_run { } msr; /* KVM_EXIT_XEN */ struct kvm_xen_exit xen; + /* KVM_EXIT_RISCV_SBI */ + struct { + unsigned long extension_id; + unsigned long function_id; + unsigned long args[6]; + unsigned long ret[2]; + } riscv_sbi; /* Fix the size of the union. */ char padding[256]; }; @@ -1112,6 +1130,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_BINARY_STATS_FD 203 #define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204 #define KVM_CAP_ARM_MTE 205 +#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206 #ifdef KVM_CAP_IRQ_ROUTING @@ -1223,11 +1242,16 @@ struct kvm_irqfd { /* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */ #define KVM_CLOCK_TSC_STABLE 2 +#define KVM_CLOCK_REALTIME (1 << 2) +#define KVM_CLOCK_HOST_TSC (1 << 3) struct kvm_clock_data { __u64 clock; __u32 flags; - __u32 pad[9]; + __u32 pad0; + __u64 realtime; + __u64 host_tsc; + __u32 pad[4]; }; /* For KVM_CAP_SW_TLB */ diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 94560ba31724..6b2407e12060 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -50,6 +50,12 @@ # define __NR_bpf 351 # elif defined(__arc__) # define __NR_bpf 280 +# elif defined(__mips__) && defined(_ABIO32) +# define __NR_bpf 4355 +# elif defined(__mips__) && defined(_ABIN32) +# define __NR_bpf 6319 +# elif defined(__mips__) && defined(_ABI64) +# define __NR_bpf 5315 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif @@ -88,146 +94,122 @@ static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int return fd; } -int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr) +int bpf_map_create(enum bpf_map_type map_type, + const char *map_name, + __u32 key_size, + __u32 value_size, + __u32 max_entries, + const struct bpf_map_create_opts *opts) { + const size_t attr_sz = offsetofend(union bpf_attr, map_extra); union bpf_attr attr; int fd; - memset(&attr, '\0', sizeof(attr)); - - attr.map_type = create_attr->map_type; - attr.key_size = create_attr->key_size; - attr.value_size = create_attr->value_size; - attr.max_entries = create_attr->max_entries; - attr.map_flags = create_attr->map_flags; - if (create_attr->name) - memcpy(attr.map_name, create_attr->name, - min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1)); - attr.numa_node = create_attr->numa_node; - attr.btf_fd = create_attr->btf_fd; - attr.btf_key_type_id = create_attr->btf_key_type_id; - attr.btf_value_type_id = create_attr->btf_value_type_id; - attr.map_ifindex = create_attr->map_ifindex; - if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) - attr.btf_vmlinux_value_type_id = - create_attr->btf_vmlinux_value_type_id; - else - attr.inner_map_fd = create_attr->inner_map_fd; - attr.map_extra = create_attr->map_extra; + memset(&attr, 0, attr_sz); + + if (!OPTS_VALID(opts, bpf_map_create_opts)) + return libbpf_err(-EINVAL); + + attr.map_type = map_type; + if (map_name) + strncat(attr.map_name, map_name, sizeof(attr.map_name) - 1); + attr.key_size = key_size; + attr.value_size = value_size; + attr.max_entries = max_entries; + + attr.btf_fd = OPTS_GET(opts, btf_fd, 0); + attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); + attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); + attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); - fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr)); + attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); + attr.map_flags = OPTS_GET(opts, map_flags, 0); + attr.map_extra = OPTS_GET(opts, map_extra, 0); + attr.numa_node = OPTS_GET(opts, numa_node, 0); + attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0); + + fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz); return libbpf_err_errno(fd); } int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) { - struct bpf_create_map_params p = {}; + LIBBPF_OPTS(bpf_map_create_opts, p); - p.map_type = create_attr->map_type; - p.key_size = create_attr->key_size; - p.value_size = create_attr->value_size; - p.max_entries = create_attr->max_entries; p.map_flags = create_attr->map_flags; - p.name = create_attr->name; p.numa_node = create_attr->numa_node; p.btf_fd = create_attr->btf_fd; p.btf_key_type_id = create_attr->btf_key_type_id; p.btf_value_type_id = create_attr->btf_value_type_id; p.map_ifindex = create_attr->map_ifindex; - if (p.map_type == BPF_MAP_TYPE_STRUCT_OPS) - p.btf_vmlinux_value_type_id = - create_attr->btf_vmlinux_value_type_id; + if (create_attr->map_type == BPF_MAP_TYPE_STRUCT_OPS) + p.btf_vmlinux_value_type_id = create_attr->btf_vmlinux_value_type_id; else p.inner_map_fd = create_attr->inner_map_fd; - return libbpf__bpf_create_map_xattr(&p); + return bpf_map_create(create_attr->map_type, create_attr->name, + create_attr->key_size, create_attr->value_size, + create_attr->max_entries, &p); } int bpf_create_map_node(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags, int node) { - struct bpf_create_map_attr map_attr = {}; - - map_attr.name = name; - map_attr.map_type = map_type; - map_attr.map_flags = map_flags; - map_attr.key_size = key_size; - map_attr.value_size = value_size; - map_attr.max_entries = max_entries; + LIBBPF_OPTS(bpf_map_create_opts, opts); + + opts.map_flags = map_flags; if (node >= 0) { - map_attr.numa_node = node; - map_attr.map_flags |= BPF_F_NUMA_NODE; + opts.numa_node = node; + opts.map_flags |= BPF_F_NUMA_NODE; } - return bpf_create_map_xattr(&map_attr); + return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts); } int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags) { - struct bpf_create_map_attr map_attr = {}; + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags); - map_attr.map_type = map_type; - map_attr.map_flags = map_flags; - map_attr.key_size = key_size; - map_attr.value_size = value_size; - map_attr.max_entries = max_entries; - - return bpf_create_map_xattr(&map_attr); + return bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts); } int bpf_create_map_name(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags) { - struct bpf_create_map_attr map_attr = {}; - - map_attr.name = name; - map_attr.map_type = map_type; - map_attr.map_flags = map_flags; - map_attr.key_size = key_size; - map_attr.value_size = value_size; - map_attr.max_entries = max_entries; + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags); - return bpf_create_map_xattr(&map_attr); + return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts); } int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, __u32 map_flags, int node) { - union bpf_attr attr; - int fd; - - memset(&attr, '\0', sizeof(attr)); - - attr.map_type = map_type; - attr.key_size = key_size; - attr.value_size = 4; - attr.inner_map_fd = inner_map_fd; - attr.max_entries = max_entries; - attr.map_flags = map_flags; - if (name) - memcpy(attr.map_name, name, - min(strlen(name), BPF_OBJ_NAME_LEN - 1)); + LIBBPF_OPTS(bpf_map_create_opts, opts); + opts.inner_map_fd = inner_map_fd; + opts.map_flags = map_flags; if (node >= 0) { - attr.map_flags |= BPF_F_NUMA_NODE; - attr.numa_node = node; + opts.map_flags |= BPF_F_NUMA_NODE; + opts.numa_node = node; } - fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr)); - return libbpf_err_errno(fd); + return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts); } int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, __u32 map_flags) { - return bpf_create_map_in_map_node(map_type, name, key_size, - inner_map_fd, max_entries, map_flags, - -1); + LIBBPF_OPTS(bpf_map_create_opts, opts, + .inner_map_fd = inner_map_fd, + .map_flags = map_flags, + ); + + return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts); } static void * @@ -321,10 +303,6 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, if (log_level && !log_buf) return libbpf_err(-EINVAL); - attr.log_level = log_level; - attr.log_buf = ptr_to_u64(log_buf); - attr.log_size = log_size; - func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); func_info = OPTS_GET(opts, func_info, NULL); attr.func_info_rec_size = func_info_rec_size; @@ -339,6 +317,12 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); + if (log_level) { + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_size; + attr.log_level = log_level; + } + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); if (fd >= 0) return fd; @@ -384,16 +368,17 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, goto done; } - if (log_level || !log_buf) - goto done; + if (log_level == 0 && log_buf) { + /* log_level == 0 with non-NULL log_buf requires retrying on error + * with log_level == 1 and log_buf/log_buf_size set, to get details of + * failure + */ + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_size; + attr.log_level = 1; - /* Try again with log */ - log_buf[0] = 0; - attr.log_buf = ptr_to_u64(log_buf); - attr.log_size = log_size; - attr.log_level = 1; - - fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); + } done: /* free() doesn't affect errno, so we don't need to restore it */ free(finfo); @@ -1062,24 +1047,65 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd) return libbpf_err_errno(fd); } -int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, - bool do_log) +int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts) { - union bpf_attr attr = {}; + const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level); + union bpf_attr attr; + char *log_buf; + size_t log_size; + __u32 log_level; int fd; - attr.btf = ptr_to_u64(btf); + memset(&attr, 0, attr_sz); + + if (!OPTS_VALID(opts, bpf_btf_load_opts)) + return libbpf_err(-EINVAL); + + log_buf = OPTS_GET(opts, log_buf, NULL); + log_size = OPTS_GET(opts, log_size, 0); + log_level = OPTS_GET(opts, log_level, 0); + + if (log_size > UINT_MAX) + return libbpf_err(-EINVAL); + if (log_size && !log_buf) + return libbpf_err(-EINVAL); + + attr.btf = ptr_to_u64(btf_data); attr.btf_size = btf_size; + /* log_level == 0 and log_buf != NULL means "try loading without + * log_buf, but retry with log_buf and log_level=1 on error", which is + * consistent across low-level and high-level BTF and program loading + * APIs within libbpf and provides a sensible behavior in practice + */ + if (log_level) { + attr.btf_log_buf = ptr_to_u64(log_buf); + attr.btf_log_size = (__u32)log_size; + attr.btf_log_level = log_level; + } -retry: - if (do_log && log_buf && log_buf_size) { - attr.btf_log_level = 1; - attr.btf_log_size = log_buf_size; + fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); + if (fd < 0 && log_buf && log_level == 0) { attr.btf_log_buf = ptr_to_u64(log_buf); + attr.btf_log_size = (__u32)log_size; + attr.btf_log_level = 1; + fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); } + return libbpf_err_errno(fd); +} + +int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + int fd; - fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr)); +retry: + if (do_log && log_buf && log_buf_size) { + opts.log_buf = log_buf; + opts.log_size = log_buf_size; + opts.log_level = 1; + } + fd = bpf_btf_load(btf, btf_size, &opts); if (fd < 0 && !do_log && log_buf && log_buf_size) { do_log = true; goto retry; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 079cc81ac51e..94e553a0ff9d 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -35,6 +35,30 @@ extern "C" { #endif +struct bpf_map_create_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + + __u32 inner_map_fd; + __u32 map_flags; + __u64 map_extra; + + __u32 numa_node; + __u32 map_ifindex; +}; +#define bpf_map_create_opts__last_field map_ifindex + +LIBBPF_API int bpf_map_create(enum bpf_map_type map_type, + const char *map_name, + __u32 key_size, + __u32 value_size, + __u32 max_entries, + const struct bpf_map_create_opts *opts); + struct bpf_create_map_attr { const char *name; enum bpf_map_type map_type; @@ -53,20 +77,25 @@ struct bpf_create_map_attr { }; }; -LIBBPF_API int -bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") +LIBBPF_API int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") LIBBPF_API int bpf_create_map_node(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags, int node); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") LIBBPF_API int bpf_create_map_name(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") LIBBPF_API int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, __u32 map_flags, int node); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, @@ -166,8 +195,9 @@ struct bpf_load_program_attr { /* Flags to direct loading requirements */ #define MAPS_RELAX_COMPAT 0x01 -/* Recommend log buffer size */ +/* Recommended log buffer size */ #define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */ + LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead") LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, char *log_buf, size_t log_buf_sz); @@ -184,6 +214,23 @@ LIBBPF_API int bpf_verify_program(enum bpf_prog_type type, char *log_buf, size_t log_buf_sz, int log_level); +struct bpf_btf_load_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + + /* kernel log options */ + char *log_buf; + __u32 log_level; + __u32 log_size; +}; +#define bpf_btf_load_opts__last_field log_size + +LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size, + const struct bpf_btf_load_opts *opts); + +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_btf_load() instead") +LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, + __u32 log_buf_size, bool do_log); + LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags); @@ -311,8 +358,6 @@ LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt); LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd); -LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, - __u32 log_buf_size, bool do_log); LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u64 *probe_addr); diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h index cc486a77db65..223308931d55 100644 --- a/tools/lib/bpf/bpf_gen_internal.h +++ b/tools/lib/bpf/bpf_gen_internal.h @@ -39,6 +39,8 @@ struct bpf_gen { int error; struct ksym_relo_desc *relos; int relo_cnt; + struct bpf_core_relo *core_relos; + int core_relo_cnt; char attach_target[128]; int attach_kind; struct ksym_desc *ksyms; @@ -51,7 +53,10 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps); void bpf_gen__free(struct bpf_gen *gen); void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size); -void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx); +void bpf_gen__map_create(struct bpf_gen *gen, + enum bpf_map_type map_type, const char *map_name, + __u32 key_size, __u32 value_size, __u32 max_entries, + struct bpf_map_create_opts *map_attr, int map_idx); void bpf_gen__prog_load(struct bpf_gen *gen, enum bpf_prog_type prog_type, const char *prog_name, const char *license, struct bpf_insn *insns, size_t insn_cnt, @@ -61,5 +66,7 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx); void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type); void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, bool is_typeless, int kind, int insn_idx); +void bpf_gen__record_relo_core(struct bpf_gen *gen, const struct bpf_core_relo *core_relo); +void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int key, int inner_map_idx); #endif diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index fadf089ae8fe..9aa19c89f758 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -454,7 +454,7 @@ const struct btf *btf__base_btf(const struct btf *btf) } /* internal helper returning non-const pointer to a type */ -struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id) +struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id) { if (type_id == 0) return &btf_void; @@ -610,6 +610,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) case BTF_KIND_RESTRICT: case BTF_KIND_VAR: case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: type_id = t->type; break; case BTF_KIND_ARRAY: @@ -1123,54 +1124,86 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf) static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian); -int btf__load_into_kernel(struct btf *btf) +int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level) { - __u32 log_buf_size = 0, raw_size; - char *log_buf = NULL; + LIBBPF_OPTS(bpf_btf_load_opts, opts); + __u32 buf_sz = 0, raw_size; + char *buf = NULL, *tmp; void *raw_data; int err = 0; if (btf->fd >= 0) return libbpf_err(-EEXIST); + if (log_sz && !log_buf) + return libbpf_err(-EINVAL); -retry_load: - if (log_buf_size) { - log_buf = malloc(log_buf_size); - if (!log_buf) - return libbpf_err(-ENOMEM); - - *log_buf = 0; - } - + /* cache native raw data representation */ raw_data = btf_get_raw_data(btf, &raw_size, false); if (!raw_data) { err = -ENOMEM; goto done; } - /* cache native raw data representation */ btf->raw_size = raw_size; btf->raw_data = raw_data; - btf->fd = bpf_load_btf(raw_data, raw_size, log_buf, log_buf_size, false); +retry_load: + /* if log_level is 0, we won't provide log_buf/log_size to the kernel, + * initially. Only if BTF loading fails, we bump log_level to 1 and + * retry, using either auto-allocated or custom log_buf. This way + * non-NULL custom log_buf provides a buffer just in case, but hopes + * for successful load and no need for log_buf. + */ + if (log_level) { + /* if caller didn't provide custom log_buf, we'll keep + * allocating our own progressively bigger buffers for BTF + * verification log + */ + if (!log_buf) { + buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2); + tmp = realloc(buf, buf_sz); + if (!tmp) { + err = -ENOMEM; + goto done; + } + buf = tmp; + buf[0] = '\0'; + } + + opts.log_buf = log_buf ? log_buf : buf; + opts.log_size = log_buf ? log_sz : buf_sz; + opts.log_level = log_level; + } + + btf->fd = bpf_btf_load(raw_data, raw_size, &opts); if (btf->fd < 0) { - if (!log_buf || errno == ENOSPC) { - log_buf_size = max((__u32)BPF_LOG_BUF_SIZE, - log_buf_size << 1); - free(log_buf); + /* time to turn on verbose mode and try again */ + if (log_level == 0) { + log_level = 1; goto retry_load; } + /* only retry if caller didn't provide custom log_buf, but + * make sure we can never overflow buf_sz + */ + if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2) + goto retry_load; err = -errno; - pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno); - if (*log_buf) - pr_warn("%s\n", log_buf); - goto done; + pr_warn("BTF loading error: %d\n", err); + /* don't print out contents of custom log_buf */ + if (!log_buf && buf[0]) + pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf); } done: - free(log_buf); + free(buf); return libbpf_err(err); } + +int btf__load_into_kernel(struct btf *btf) +{ + return btf_load_into_kernel(btf, NULL, 0, 0); +} + int btf__load(struct btf *) __attribute__((alias("btf__load_into_kernel"))); int btf__fd(const struct btf *btf) @@ -2730,15 +2763,11 @@ void btf_ext__free(struct btf_ext *btf_ext) free(btf_ext); } -struct btf_ext *btf_ext__new(__u8 *data, __u32 size) +struct btf_ext *btf_ext__new(const __u8 *data, __u32 size) { struct btf_ext *btf_ext; int err; - err = btf_ext_parse_hdr(data, size); - if (err) - return libbpf_err_ptr(err); - btf_ext = calloc(1, sizeof(struct btf_ext)); if (!btf_ext) return libbpf_err_ptr(-ENOMEM); @@ -2751,6 +2780,10 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size) } memcpy(btf_ext->data, data, size); + err = btf_ext_parse_hdr(btf_ext->data, size); + if (err) + goto done; + if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) { err = -EINVAL; goto done; @@ -3074,7 +3107,7 @@ done: return libbpf_err(err); } -COMPAT_VERSION(bpf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2) +COMPAT_VERSION(btf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2) int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts) { LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext); @@ -3476,8 +3509,8 @@ static long btf_hash_struct(struct btf_type *t) } /* - * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type - * IDs. This check is performed during type graph equivalence check and + * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced + * type IDs. This check is performed during type graph equivalence check and * referenced types equivalence is checked separately. */ static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) @@ -3850,6 +3883,31 @@ static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) return btf_equal_array(t1, t2); } +/* Check if given two types are identical STRUCT/UNION definitions */ +static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2) +{ + const struct btf_member *m1, *m2; + struct btf_type *t1, *t2; + int n, i; + + t1 = btf_type_by_id(d->btf, id1); + t2 = btf_type_by_id(d->btf, id2); + + if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2)) + return false; + + if (!btf_shallow_equal_struct(t1, t2)) + return false; + + m1 = btf_members(t1); + m2 = btf_members(t2); + for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { + if (m1->type != m2->type) + return false; + } + return true; +} + /* * Check equivalence of BTF type graph formed by candidate struct/union (we'll * call it "candidate graph" in this description for brevity) to a type graph @@ -3961,6 +4019,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, hypot_type_id = d->hypot_map[canon_id]; if (hypot_type_id <= BTF_MAX_NR_TYPES) { + if (hypot_type_id == cand_id) + return 1; /* In some cases compiler will generate different DWARF types * for *identical* array type definitions and use them for * different fields within the *same* struct. This breaks type @@ -3969,8 +4029,18 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, * types within a single CU. So work around that by explicitly * allowing identical array types here. */ - return hypot_type_id == cand_id || - btf_dedup_identical_arrays(d, hypot_type_id, cand_id); + if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id)) + return 1; + /* It turns out that similar situation can happen with + * struct/union sometimes, sigh... Handle the case where + * structs/unions are exactly the same, down to the referenced + * type IDs. Anything more complicated (e.g., if referenced + * types are different, but equivalent) is *way more* + * complicated and requires a many-to-many equivalence mapping. + */ + if (btf_dedup_identical_structs(d, hypot_type_id, cand_id)) + return 1; + return 0; } if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) @@ -4023,6 +4093,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: + case BTF_KIND_TYPE_TAG: if (cand_type->info != canon_type->info) return 0; return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 5c73a5b0a044..742a2bf71c5e 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -157,7 +157,7 @@ LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, __u32 expected_value_size, __u32 *key_type_id, __u32 *value_type_id); -LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size); +LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size); LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 05f3e7dfec0a..f06a1d343c92 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -2216,7 +2216,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d, __u8 bits_offset, __u8 bit_sz) { - int size, err; + int size, err = 0; size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset); if (size < 0) diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c index c28ab24d1bff..8ed02e89c9a9 100644 --- a/tools/lib/bpf/gen_loader.c +++ b/tools/lib/bpf/gen_loader.c @@ -445,47 +445,33 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data, } void bpf_gen__map_create(struct bpf_gen *gen, - struct bpf_create_map_params *map_attr, int map_idx) + enum bpf_map_type map_type, + const char *map_name, + __u32 key_size, __u32 value_size, __u32 max_entries, + struct bpf_map_create_opts *map_attr, int map_idx) { - int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id); + int attr_size = offsetofend(union bpf_attr, map_extra); bool close_inner_map_fd = false; int map_create_attr, idx; union bpf_attr attr; memset(&attr, 0, attr_size); - attr.map_type = map_attr->map_type; - attr.key_size = map_attr->key_size; - attr.value_size = map_attr->value_size; + attr.map_type = map_type; + attr.key_size = key_size; + attr.value_size = value_size; attr.map_flags = map_attr->map_flags; attr.map_extra = map_attr->map_extra; - memcpy(attr.map_name, map_attr->name, - min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1)); + if (map_name) + memcpy(attr.map_name, map_name, + min((unsigned)strlen(map_name), BPF_OBJ_NAME_LEN - 1)); attr.numa_node = map_attr->numa_node; attr.map_ifindex = map_attr->map_ifindex; - attr.max_entries = map_attr->max_entries; - switch (attr.map_type) { - case BPF_MAP_TYPE_PERF_EVENT_ARRAY: - case BPF_MAP_TYPE_CGROUP_ARRAY: - case BPF_MAP_TYPE_STACK_TRACE: - case BPF_MAP_TYPE_ARRAY_OF_MAPS: - case BPF_MAP_TYPE_HASH_OF_MAPS: - case BPF_MAP_TYPE_DEVMAP: - case BPF_MAP_TYPE_DEVMAP_HASH: - case BPF_MAP_TYPE_CPUMAP: - case BPF_MAP_TYPE_XSKMAP: - case BPF_MAP_TYPE_SOCKMAP: - case BPF_MAP_TYPE_SOCKHASH: - case BPF_MAP_TYPE_QUEUE: - case BPF_MAP_TYPE_STACK: - case BPF_MAP_TYPE_RINGBUF: - break; - default: - attr.btf_key_type_id = map_attr->btf_key_type_id; - attr.btf_value_type_id = map_attr->btf_value_type_id; - } + attr.max_entries = max_entries; + attr.btf_key_type_id = map_attr->btf_key_type_id; + attr.btf_value_type_id = map_attr->btf_value_type_id; pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n", - attr.map_name, map_idx, map_attr->map_type, attr.btf_value_type_id); + attr.map_name, map_idx, map_type, attr.btf_value_type_id); map_create_attr = add_data(gen, &attr, attr_size); if (attr.btf_value_type_id) @@ -512,7 +498,7 @@ void bpf_gen__map_create(struct bpf_gen *gen, /* emit MAP_CREATE command */ emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size); debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d", - attr.map_name, map_idx, map_attr->map_type, attr.value_size, + attr.map_name, map_idx, map_type, value_size, attr.btf_value_type_id); emit_check_err(gen); /* remember map_fd in the stack, if successful */ @@ -701,27 +687,29 @@ static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo return; } kdesc->off = btf_fd_idx; - /* set a default value for imm */ + /* jump to success case */ + emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); + /* set value for imm, off as 0 */ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); - /* skip success case store if ret < 0 */ - emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 1)); + emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); + /* skip success case for ret < 0 */ + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10)); /* store btf_id into insn[insn_idx].imm */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); + /* obtain fd in BPF_REG_9 */ + emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7)); + emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); + /* jump to fd_array store if fd denotes module BTF */ + emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2)); + /* set the default value for off */ + emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); + /* skip BTF fd store for vmlinux BTF */ + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4)); /* load fd_array slot pointer */ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx))); - /* skip store of BTF fd if ret < 0 */ - emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 3)); /* store BTF fd in slot */ - emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7)); - emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0)); - /* set a default value for off */ - emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); - /* skip insn->off store if ret < 0 */ - emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 2)); - /* skip if vmlinux BTF */ - emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1)); /* store index into insn[insn_idx].off */ emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx)); log: @@ -820,9 +808,8 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, kdesc->insn + offsetof(struct bpf_insn, imm)); move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); - emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_8, offsetof(struct bpf_insn, imm))); - /* jump over src_reg adjustment if imm is not 0 */ - emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 3)); + /* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */ + emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3)); goto clear_src_reg; } /* remember insn offset, so we can copy BTF ID and FD later */ @@ -830,17 +817,20 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, emit_bpf_find_by_name_kind(gen, relo); if (!relo->is_weak) emit_check_err(gen); - /* set default values as 0 */ + /* jump to success case */ + emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); + /* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0)); - /* skip success case stores if ret < 0 */ - emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 4)); + /* skip success case for ret < 0 */ + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4)); /* store btf_id into insn[insn_idx].imm */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); /* store btf_obj_fd into insn[insn_idx + 1].imm */ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32)); emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); + /* skip src_reg adjustment */ emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); clear_src_reg: /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */ @@ -852,6 +842,22 @@ clear_src_reg: emit_ksym_relo_log(gen, relo, kdesc->ref); } +void bpf_gen__record_relo_core(struct bpf_gen *gen, + const struct bpf_core_relo *core_relo) +{ + struct bpf_core_relo *relos; + + relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos)); + if (!relos) { + gen->error = -ENOMEM; + return; + } + gen->core_relos = relos; + relos += gen->core_relo_cnt; + memcpy(relos, core_relo, sizeof(*relos)); + gen->core_relo_cnt++; +} + static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns) { int insn; @@ -884,6 +890,15 @@ static void emit_relos(struct bpf_gen *gen, int insns) emit_relo(gen, gen->relos + i, insns); } +static void cleanup_core_relo(struct bpf_gen *gen) +{ + if (!gen->core_relo_cnt) + return; + free(gen->core_relos); + gen->core_relo_cnt = 0; + gen->core_relos = NULL; +} + static void cleanup_relos(struct bpf_gen *gen, int insns) { int i, insn; @@ -911,6 +926,7 @@ static void cleanup_relos(struct bpf_gen *gen, int insns) gen->relo_cnt = 0; gen->relos = NULL; } + cleanup_core_relo(gen); } void bpf_gen__prog_load(struct bpf_gen *gen, @@ -918,12 +934,13 @@ void bpf_gen__prog_load(struct bpf_gen *gen, const char *license, struct bpf_insn *insns, size_t insn_cnt, struct bpf_prog_load_opts *load_attr, int prog_idx) { - int attr_size = offsetofend(union bpf_attr, fd_array); - int prog_load_attr, license_off, insns_off, func_info, line_info; + int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos; + int attr_size = offsetofend(union bpf_attr, core_relo_rec_size); union bpf_attr attr; memset(&attr, 0, attr_size); - pr_debug("gen: prog_load: type %d insns_cnt %zd\n", prog_type, insn_cnt); + pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n", + prog_type, insn_cnt, prog_idx); /* add license string to blob of bytes */ license_off = add_data(gen, license, strlen(license) + 1); /* add insns to blob of bytes */ @@ -947,6 +964,11 @@ void bpf_gen__prog_load(struct bpf_gen *gen, line_info = add_data(gen, load_attr->line_info, attr.line_info_cnt * attr.line_info_rec_size); + attr.core_relo_rec_size = sizeof(struct bpf_core_relo); + attr.core_relo_cnt = gen->core_relo_cnt; + core_relos = add_data(gen, gen->core_relos, + attr.core_relo_cnt * attr.core_relo_rec_size); + memcpy(attr.prog_name, prog_name, min((unsigned)strlen(prog_name), BPF_OBJ_NAME_LEN - 1)); prog_load_attr = add_data(gen, &attr, attr_size); @@ -963,6 +985,9 @@ void bpf_gen__prog_load(struct bpf_gen *gen, /* populate union bpf_attr with a pointer to line_info */ emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info); + /* populate union bpf_attr with a pointer to core_relos */ + emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos); + /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */ emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array); @@ -993,9 +1018,11 @@ void bpf_gen__prog_load(struct bpf_gen *gen, debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt); /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */ cleanup_relos(gen, insns_off); - if (gen->attach_kind) + if (gen->attach_kind) { emit_sys_close_blob(gen, attr_field(prog_load_attr, attach_btf_obj_fd)); + gen->attach_kind = 0; + } emit_check_err(gen); /* remember prog_fd in the stack, if successful */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, @@ -1041,6 +1068,33 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue, emit_check_err(gen); } +void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot, + int inner_map_idx) +{ + int attr_size = offsetofend(union bpf_attr, flags); + int map_update_attr, key; + union bpf_attr attr; + + memset(&attr, 0, attr_size); + pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n", + outer_map_idx, slot, inner_map_idx); + + key = add_data(gen, &slot, sizeof(slot)); + + map_update_attr = add_data(gen, &attr, attr_size); + move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, + blob_fd_array_off(gen, outer_map_idx)); + emit_rel_store(gen, attr_field(map_update_attr, key), key); + emit_rel_store(gen, attr_field(map_update_attr, value), + blob_fd_array_off(gen, inner_map_idx)); + + /* emit MAP_UPDATE_ELEM command */ + emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size); + debug_ret(gen, "populate_outer_map outer %d key %d inner %d", + outer_map_idx, slot, inner_map_idx); + emit_check_err(gen); +} + void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx) { int attr_size = offsetofend(union bpf_attr, map_fd); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index f6faa33c80fa..902f1ad5b7e6 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -168,6 +168,25 @@ int libbpf_set_strict_mode(enum libbpf_strict_mode mode) return 0; } +__u32 libbpf_major_version(void) +{ + return LIBBPF_MAJOR_VERSION; +} + +__u32 libbpf_minor_version(void) +{ + return LIBBPF_MINOR_VERSION; +} + +const char *libbpf_version_string(void) +{ +#define __S(X) #X +#define _S(X) __S(X) + return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION); +#undef _S +#undef __S +} + enum kern_feature_id { /* v4.14: kernel support for program & map names. */ FEAT_PROG_NAME, @@ -211,13 +230,19 @@ enum reloc_type { RELO_EXTERN_VAR, RELO_EXTERN_FUNC, RELO_SUBPROG_ADDR, + RELO_CORE, }; struct reloc_desc { enum reloc_type type; int insn_idx; - int map_idx; - int sym_off; + union { + const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */ + struct { + int map_idx; + int sym_off; + }; + }; }; struct bpf_sec_def; @@ -306,7 +331,11 @@ struct bpf_program { struct reloc_desc *reloc_desc; int nr_reloc; - int log_level; + + /* BPF verifier log settings */ + char *log_buf; + size_t log_size; + __u32 log_level; struct { int nr; @@ -402,6 +431,7 @@ struct bpf_map { char *pin_path; bool pinned; bool reused; + bool skipped; __u64 map_extra; }; @@ -548,6 +578,11 @@ struct bpf_object { size_t btf_module_cnt; size_t btf_module_cap; + /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */ + char *log_buf; + size_t log_size; + __u32 log_level; + void *priv; bpf_object_clear_priv_t clear_priv; @@ -683,6 +718,9 @@ bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog, prog->instances.fds = NULL; prog->instances.nr = -1; + /* inherit object's log_level */ + prog->log_level = obj->log_level; + prog->sec_name = strdup(sec_name); if (!prog->sec_name) goto errout; @@ -2258,6 +2296,9 @@ int parse_btf_map_def(const char *map_name, struct btf *btf, map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE; } else if (strcmp(name, "values") == 0) { + bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type); + bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY; + const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value"; char inner_map_name[128]; int err; @@ -2271,8 +2312,8 @@ int parse_btf_map_def(const char *map_name, struct btf *btf, map_name, name); return -EINVAL; } - if (!bpf_map_type__is_map_in_map(map_def->map_type)) { - pr_warn("map '%s': should be map-in-map.\n", + if (!is_map_in_map && !is_prog_array) { + pr_warn("map '%s': should be map-in-map or prog-array.\n", map_name); return -ENOTSUP; } @@ -2284,22 +2325,30 @@ int parse_btf_map_def(const char *map_name, struct btf *btf, map_def->value_size = 4; t = btf__type_by_id(btf, m->type); if (!t) { - pr_warn("map '%s': map-in-map inner type [%d] not found.\n", - map_name, m->type); + pr_warn("map '%s': %s type [%d] not found.\n", + map_name, desc, m->type); return -EINVAL; } if (!btf_is_array(t) || btf_array(t)->nelems) { - pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n", - map_name); + pr_warn("map '%s': %s spec is not a zero-sized array.\n", + map_name, desc); return -EINVAL; } t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL); if (!btf_is_ptr(t)) { - pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", - map_name, btf_kind_str(t)); + pr_warn("map '%s': %s def is of unexpected kind %s.\n", + map_name, desc, btf_kind_str(t)); return -EINVAL; } t = skip_mods_and_typedefs(btf, t->type, NULL); + if (is_prog_array) { + if (!btf_is_func_proto(t)) { + pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n", + map_name, btf_kind_str(t)); + return -EINVAL; + } + continue; + } if (!btf_is_struct(t)) { pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", map_name, btf_kind_str(t)); @@ -2981,7 +3030,9 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) */ btf__set_fd(kern_btf, 0); } else { - err = btf__load_into_kernel(kern_btf); + /* currently BPF_BTF_LOAD only supports log_level 1 */ + err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size, + obj->log_level ? 1 : 0); } if (sanitize) { if (!err) { @@ -3350,7 +3401,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj) /* sort BPF programs by section name and in-section instruction offset * for faster search */ - qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); + if (obj->nr_programs) + qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); return bpf_object__init_btf(obj, btf_data, btf_ext_data); } @@ -4342,7 +4394,6 @@ static int probe_kern_prog_name(void) static int probe_kern_global_data(void) { - struct bpf_create_map_attr map_attr; char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16), @@ -4352,13 +4403,7 @@ static int probe_kern_global_data(void) }; int ret, map, insn_cnt = ARRAY_SIZE(insns); - memset(&map_attr, 0, sizeof(map_attr)); - map_attr.map_type = BPF_MAP_TYPE_ARRAY; - map_attr.key_size = sizeof(int); - map_attr.value_size = 32; - map_attr.max_entries = 1; - - map = bpf_create_map_xattr(&map_attr); + map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL); if (map < 0) { ret = -errno; cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); @@ -4488,15 +4533,11 @@ static int probe_kern_btf_type_tag(void) static int probe_kern_array_mmap(void) { - struct bpf_create_map_attr attr = { - .map_type = BPF_MAP_TYPE_ARRAY, - .map_flags = BPF_F_MMAPABLE, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 1, - }; + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE); + int fd; - return probe_fd(bpf_create_map_xattr(&attr)); + fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(int), 1, &opts); + return probe_fd(fd); } static int probe_kern_exp_attach_type(void) @@ -4535,7 +4576,6 @@ static int probe_kern_probe_read_kernel(void) static int probe_prog_bind_map(void) { - struct bpf_create_map_attr map_attr; char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), @@ -4543,13 +4583,7 @@ static int probe_prog_bind_map(void) }; int ret, map, prog, insn_cnt = ARRAY_SIZE(insns); - memset(&map_attr, 0, sizeof(map_attr)); - map_attr.map_type = BPF_MAP_TYPE_ARRAY; - map_attr.key_size = sizeof(int); - map_attr.value_size = 32; - map_attr.max_entries = 1; - - map = bpf_create_map_xattr(&map_attr); + map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL); if (map < 0) { ret = -errno; cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); @@ -4820,19 +4854,16 @@ static void bpf_map__destroy(struct bpf_map *map); static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) { - struct bpf_create_map_params create_attr; + LIBBPF_OPTS(bpf_map_create_opts, create_attr); struct bpf_map_def *def = &map->def; + const char *map_name = NULL; + __u32 max_entries; int err = 0; - memset(&create_attr, 0, sizeof(create_attr)); - if (kernel_supports(obj, FEAT_PROG_NAME)) - create_attr.name = map->name; + map_name = map->name; create_attr.map_ifindex = map->map_ifindex; - create_attr.map_type = def->type; create_attr.map_flags = def->map_flags; - create_attr.key_size = def->key_size; - create_attr.value_size = def->value_size; create_attr.numa_node = map->numa_node; create_attr.map_extra = map->map_extra; @@ -4846,18 +4877,14 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b return nr_cpus; } pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); - create_attr.max_entries = nr_cpus; + max_entries = nr_cpus; } else { - create_attr.max_entries = def->max_entries; + max_entries = def->max_entries; } if (bpf_map__is_struct_ops(map)) - create_attr.btf_vmlinux_value_type_id = - map->btf_vmlinux_value_type_id; + create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; - create_attr.btf_fd = 0; - create_attr.btf_key_type_id = 0; - create_attr.btf_value_type_id = 0; if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { create_attr.btf_fd = btf__fd(obj->btf); create_attr.btf_key_type_id = map->btf_key_type_id; @@ -4903,13 +4930,17 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b } if (obj->gen_loader) { - bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps); + bpf_gen__map_create(obj->gen_loader, def->type, map_name, + def->key_size, def->value_size, max_entries, + &create_attr, is_inner ? -1 : map - obj->maps); /* Pretend to have valid FD to pass various fd >= 0 checks. * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. */ map->fd = 0; } else { - map->fd = libbpf__bpf_create_map_xattr(&create_attr); + map->fd = bpf_map_create(def->type, map_name, + def->key_size, def->value_size, + max_entries, &create_attr); } if (map->fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) { @@ -4924,7 +4955,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b create_attr.btf_value_type_id = 0; map->btf_key_type_id = 0; map->btf_value_type_id = 0; - map->fd = libbpf__bpf_create_map_xattr(&create_attr); + map->fd = bpf_map_create(def->type, map_name, + def->key_size, def->value_size, + max_entries, &create_attr); } err = map->fd < 0 ? -errno : 0; @@ -4939,7 +4972,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b return err; } -static int init_map_slots(struct bpf_object *obj, struct bpf_map *map) +static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map) { const struct bpf_map *targ_map; unsigned int i; @@ -4951,18 +4984,18 @@ static int init_map_slots(struct bpf_object *obj, struct bpf_map *map) targ_map = map->init_slots[i]; fd = bpf_map__fd(targ_map); + if (obj->gen_loader) { - pr_warn("// TODO map_update_elem: idx %td key %d value==map_idx %td\n", - map - obj->maps, i, targ_map - obj->maps); - return -ENOTSUP; + bpf_gen__populate_outer_map(obj->gen_loader, + map - obj->maps, i, + targ_map - obj->maps); } else { err = bpf_map_update_elem(map->fd, &i, &fd, 0); } if (err) { err = -errno; pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", - map->name, i, targ_map->name, - fd, err); + map->name, i, targ_map->name, fd, err); return err; } pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", @@ -4975,6 +5008,59 @@ static int init_map_slots(struct bpf_object *obj, struct bpf_map *map) return 0; } +static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map) +{ + const struct bpf_program *targ_prog; + unsigned int i; + int fd, err; + + if (obj->gen_loader) + return -ENOTSUP; + + for (i = 0; i < map->init_slots_sz; i++) { + if (!map->init_slots[i]) + continue; + + targ_prog = map->init_slots[i]; + fd = bpf_program__fd(targ_prog); + + err = bpf_map_update_elem(map->fd, &i, &fd, 0); + if (err) { + err = -errno; + pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n", + map->name, i, targ_prog->name, fd, err); + return err; + } + pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n", + map->name, i, targ_prog->name, fd); + } + + zfree(&map->init_slots); + map->init_slots_sz = 0; + + return 0; +} + +static int bpf_object_init_prog_arrays(struct bpf_object *obj) +{ + struct bpf_map *map; + int i, err; + + for (i = 0; i < obj->nr_maps; i++) { + map = &obj->maps[i]; + + if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY) + continue; + + err = init_prog_array_slots(obj, map); + if (err < 0) { + zclose(map->fd); + return err; + } + } + return 0; +} + static int bpf_object__create_maps(struct bpf_object *obj) { @@ -4987,6 +5073,26 @@ bpf_object__create_maps(struct bpf_object *obj) for (i = 0; i < obj->nr_maps; i++) { map = &obj->maps[i]; + /* To support old kernels, we skip creating global data maps + * (.rodata, .data, .kconfig, etc); later on, during program + * loading, if we detect that at least one of the to-be-loaded + * programs is referencing any global data map, we'll error + * out with program name and relocation index logged. + * This approach allows to accommodate Clang emitting + * unnecessary .rodata.str1.1 sections for string literals, + * but also it allows to have CO-RE applications that use + * global variables in some of BPF programs, but not others. + * If those global variable-using programs are not loaded at + * runtime due to bpf_program__set_autoload(prog, false), + * bpf_object loading will succeed just fine even on old + * kernels. + */ + if (bpf_map__is_internal(map) && + !kernel_supports(obj, FEAT_GLOBAL_DATA)) { + map->skipped = true; + continue; + } + retried = false; retry: if (map->pin_path) { @@ -5023,8 +5129,8 @@ retry: } } - if (map->init_slots_sz) { - err = init_map_slots(obj, map); + if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) { + err = init_map_in_map_slots(obj, map); if (err < 0) { zclose(map->fd); goto err_out; @@ -5096,15 +5202,18 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand, struct bpf_core_cand_list *cands) { struct bpf_core_cand *new_cands, *cand; - const struct btf_type *t; - const char *targ_name; + const struct btf_type *t, *local_t; + const char *targ_name, *local_name; size_t targ_essent_len; int n, i; + local_t = btf__type_by_id(local_cand->btf, local_cand->id); + local_name = btf__str_by_offset(local_cand->btf, local_t->name_off); + n = btf__type_cnt(targ_btf); for (i = targ_start_id; i < n; i++) { t = btf__type_by_id(targ_btf, i); - if (btf_kind(t) != btf_kind(local_cand->t)) + if (btf_kind(t) != btf_kind(local_t)) continue; targ_name = btf__name_by_offset(targ_btf, t->name_off); @@ -5115,12 +5224,12 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand, if (targ_essent_len != local_essent_len) continue; - if (strncmp(local_cand->name, targ_name, local_essent_len) != 0) + if (strncmp(local_name, targ_name, local_essent_len) != 0) continue; pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n", - local_cand->id, btf_kind_str(local_cand->t), - local_cand->name, i, btf_kind_str(t), targ_name, + local_cand->id, btf_kind_str(local_t), + local_name, i, btf_kind_str(t), targ_name, targ_btf_name); new_cands = libbpf_reallocarray(cands->cands, cands->len + 1, sizeof(*cands->cands)); @@ -5129,8 +5238,6 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand, cand = &new_cands[cands->len]; cand->btf = targ_btf; - cand->t = t; - cand->name = targ_name; cand->id = i; cands->cands = new_cands; @@ -5237,18 +5344,21 @@ bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 l struct bpf_core_cand local_cand = {}; struct bpf_core_cand_list *cands; const struct btf *main_btf; + const struct btf_type *local_t; + const char *local_name; size_t local_essent_len; int err, i; local_cand.btf = local_btf; - local_cand.t = btf__type_by_id(local_btf, local_type_id); - if (!local_cand.t) + local_cand.id = local_type_id; + local_t = btf__type_by_id(local_btf, local_type_id); + if (!local_t) return ERR_PTR(-EINVAL); - local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off); - if (str_is_empty(local_cand.name)) + local_name = btf__name_by_offset(local_btf, local_t->name_off); + if (str_is_empty(local_name)) return ERR_PTR(-EINVAL); - local_essent_len = bpf_core_essential_name_len(local_cand.name); + local_essent_len = bpf_core_essential_name_len(local_name); cands = calloc(1, sizeof(*cands)); if (!cands) @@ -5398,12 +5508,31 @@ static void *u32_as_hash_key(__u32 x) return (void *)(uintptr_t)x; } +static int record_relo_core(struct bpf_program *prog, + const struct bpf_core_relo *core_relo, int insn_idx) +{ + struct reloc_desc *relos, *relo; + + relos = libbpf_reallocarray(prog->reloc_desc, + prog->nr_reloc + 1, sizeof(*relos)); + if (!relos) + return -ENOMEM; + relo = &relos[prog->nr_reloc]; + relo->type = RELO_CORE; + relo->insn_idx = insn_idx; + relo->core_relo = core_relo; + prog->reloc_desc = relos; + prog->nr_reloc++; + return 0; +} + static int bpf_core_apply_relo(struct bpf_program *prog, const struct bpf_core_relo *relo, int relo_idx, const struct btf *local_btf, struct hashmap *cand_cache) { + struct bpf_core_spec specs_scratch[3] = {}; const void *type_key = u32_as_hash_key(relo->type_id); struct bpf_core_cand_list *cands = NULL; const char *prog_name = prog->name; @@ -5434,13 +5563,15 @@ static int bpf_core_apply_relo(struct bpf_program *prog, return -EINVAL; if (prog->obj->gen_loader) { - pr_warn("// TODO core_relo: prog %td insn[%d] %s kind %d\n", + const char *spec_str = btf__name_by_offset(local_btf, relo->access_str_off); + + pr_debug("record_relo_core: prog %td insn[%d] %s %s %s final insn_idx %d\n", prog - prog->obj->programs, relo->insn_off / 8, - local_name, relo->kind); - return -ENOTSUP; + btf_kind_str(local_type), local_name, spec_str, insn_idx); + return record_relo_core(prog, relo, insn_idx); } - if (relo->kind != BPF_TYPE_ID_LOCAL && + if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && !hashmap__find(cand_cache, type_key, (void **)&cands)) { cands = bpf_core_find_cands(prog->obj, local_btf, local_id); if (IS_ERR(cands)) { @@ -5456,7 +5587,8 @@ static int bpf_core_apply_relo(struct bpf_program *prog, } } - return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo, relo_idx, local_btf, cands); + return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo, + relo_idx, local_btf, cands, specs_scratch); } static int @@ -5586,6 +5718,13 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; insn[0].imm = relo->map_idx; } else { + const struct bpf_map *map = &obj->maps[relo->map_idx]; + + if (map->skipped) { + pr_warn("prog '%s': relo #%d: kernel doesn't support global data\n", + prog->name, i); + return -ENOTSUP; + } insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; insn[0].imm = obj->maps[relo->map_idx].fd; } @@ -5634,6 +5773,9 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) case RELO_CALL: /* handled already */ break; + case RELO_CORE: + /* will be handled by bpf_program_record_relos() */ + break; default: pr_warn("prog '%s': relo #%d: bad relo type %d\n", prog->name, i, relo->type); @@ -5797,6 +5939,8 @@ static int cmp_relo_by_insn_idx(const void *key, const void *elem) static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx) { + if (!prog->nr_reloc) + return NULL; return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); } @@ -5812,8 +5956,9 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); if (!relos) return -ENOMEM; - memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, - sizeof(*relos) * subprog->nr_reloc); + if (subprog->nr_reloc) + memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, + sizeof(*relos) * subprog->nr_reloc); for (i = main_prog->nr_reloc; i < new_cnt; i++) relos[i].insn_idx += subprog->sub_insn_off; @@ -6071,6 +6216,35 @@ bpf_object__free_relocs(struct bpf_object *obj) } } +static int cmp_relocs(const void *_a, const void *_b) +{ + const struct reloc_desc *a = _a; + const struct reloc_desc *b = _b; + + if (a->insn_idx != b->insn_idx) + return a->insn_idx < b->insn_idx ? -1 : 1; + + /* no two relocations should have the same insn_idx, but ... */ + if (a->type != b->type) + return a->type < b->type ? -1 : 1; + + return 0; +} + +static void bpf_object__sort_relos(struct bpf_object *obj) +{ + int i; + + for (i = 0; i < obj->nr_programs; i++) { + struct bpf_program *p = &obj->programs[i]; + + if (!p->nr_reloc) + continue; + + qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); + } +} + static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) { @@ -6085,6 +6259,8 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) err); return err; } + if (obj->gen_loader) + bpf_object__sort_relos(obj); } /* Before relocating calls pre-process relocations and mark @@ -6120,6 +6296,8 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) */ if (prog_is_subprog(obj, prog)) continue; + if (!prog->load) + continue; err = bpf_object__relocate_calls(obj, prog); if (err) { @@ -6133,6 +6311,8 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) prog = &obj->programs[i]; if (prog_is_subprog(obj, prog)) continue; + if (!prog->load) + continue; err = bpf_object__relocate_data(obj, prog); if (err) { pr_warn("prog '%s': failed to relocate data references: %d\n", @@ -6155,9 +6335,11 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, int i, j, nrels, new_sz; const struct btf_var_secinfo *vi = NULL; const struct btf_type *sec, *var, *def; - struct bpf_map *map = NULL, *targ_map; + struct bpf_map *map = NULL, *targ_map = NULL; + struct bpf_program *targ_prog = NULL; + bool is_prog_array, is_map_in_map; const struct btf_member *member; - const char *name, *mname; + const char *name, *mname, *type; unsigned int moff; Elf64_Sym *sym; Elf64_Rel *rel; @@ -6184,11 +6366,6 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, return -LIBBPF_ERRNO__FORMAT; } name = elf_sym_str(obj, sym->st_name) ?: "<?>"; - if (sym->st_shndx != obj->efile.btf_maps_shndx) { - pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", - i, name); - return -LIBBPF_ERRNO__RELOC; - } pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n", i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value, @@ -6210,19 +6387,45 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, return -EINVAL; } - if (!bpf_map_type__is_map_in_map(map->def.type)) - return -EINVAL; - if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && - map->def.key_size != sizeof(int)) { - pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", - i, map->name, sizeof(int)); + is_map_in_map = bpf_map_type__is_map_in_map(map->def.type); + is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY; + type = is_map_in_map ? "map" : "prog"; + if (is_map_in_map) { + if (sym->st_shndx != obj->efile.btf_maps_shndx) { + pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", + i, name); + return -LIBBPF_ERRNO__RELOC; + } + if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && + map->def.key_size != sizeof(int)) { + pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", + i, map->name, sizeof(int)); + return -EINVAL; + } + targ_map = bpf_object__find_map_by_name(obj, name); + if (!targ_map) { + pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n", + i, name); + return -ESRCH; + } + } else if (is_prog_array) { + targ_prog = bpf_object__find_program_by_name(obj, name); + if (!targ_prog) { + pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n", + i, name); + return -ESRCH; + } + if (targ_prog->sec_idx != sym->st_shndx || + targ_prog->sec_insn_off * 8 != sym->st_value || + prog_is_subprog(obj, targ_prog)) { + pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n", + i, name); + return -LIBBPF_ERRNO__RELOC; + } + } else { return -EINVAL; } - targ_map = bpf_object__find_map_by_name(obj, name); - if (!targ_map) - return -ESRCH; - var = btf__type_by_id(obj->btf, vi->type); def = skip_mods_and_typedefs(obj->btf, var->type, NULL); if (btf_vlen(def) == 0) @@ -6253,30 +6456,15 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, (new_sz - map->init_slots_sz) * host_ptr_sz); map->init_slots_sz = new_sz; } - map->init_slots[moff] = targ_map; + map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog; - pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n", - i, map->name, moff, name); + pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n", + i, map->name, moff, type, name); } return 0; } -static int cmp_relocs(const void *_a, const void *_b) -{ - const struct reloc_desc *a = _a; - const struct reloc_desc *b = _b; - - if (a->insn_idx != b->insn_idx) - return a->insn_idx < b->insn_idx ? -1 : 1; - - /* no two relocations should have the same insn_idx, but ... */ - if (a->type != b->type) - return a->type < b->type ? -1 : 1; - - return 0; -} - static int bpf_object__collect_relos(struct bpf_object *obj) { int i, err; @@ -6309,14 +6497,7 @@ static int bpf_object__collect_relos(struct bpf_object *obj) return err; } - for (i = 0; i < obj->nr_programs; i++) { - struct bpf_program *p = &obj->programs[i]; - - if (!p->nr_reloc) - continue; - - qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); - } + bpf_object__sort_relos(obj); return 0; } @@ -6419,8 +6600,10 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog const char *prog_name = NULL; char *cp, errmsg[STRERR_BUFSIZE]; size_t log_buf_size = 0; - char *log_buf = NULL; + char *log_buf = NULL, *tmp; int btf_fd, ret, err; + bool own_log_buf = true; + __u32 log_level = prog->log_level; if (prog->type == BPF_PROG_TYPE_UNSPEC) { /* @@ -6438,7 +6621,6 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog load_attr.expected_attach_type = prog->expected_attach_type; if (kernel_supports(obj, FEAT_PROG_NAME)) prog_name = prog->name; - load_attr.attach_btf_id = prog->attach_btf_id; load_attr.attach_prog_fd = prog->attach_prog_fd; load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; load_attr.attach_btf_id = prog->attach_btf_id; @@ -6456,7 +6638,7 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog load_attr.line_info_rec_size = prog->line_info_rec_size; load_attr.line_info_cnt = prog->line_info_cnt; } - load_attr.log_level = prog->log_level; + load_attr.log_level = log_level; load_attr.prog_flags = prog->prog_flags; load_attr.fd_array = obj->fd_array; @@ -6477,22 +6659,45 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog *prog_fd = -1; return 0; } -retry_load: - if (log_buf_size) { - log_buf = malloc(log_buf_size); - if (!log_buf) - return -ENOMEM; - *log_buf = 0; +retry_load: + /* if log_level is zero, we don't request logs initiallly even if + * custom log_buf is specified; if the program load fails, then we'll + * bump log_level to 1 and use either custom log_buf or we'll allocate + * our own and retry the load to get details on what failed + */ + if (log_level) { + if (prog->log_buf) { + log_buf = prog->log_buf; + log_buf_size = prog->log_size; + own_log_buf = false; + } else if (obj->log_buf) { + log_buf = obj->log_buf; + log_buf_size = obj->log_size; + own_log_buf = false; + } else { + log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2); + tmp = realloc(log_buf, log_buf_size); + if (!tmp) { + ret = -ENOMEM; + goto out; + } + log_buf = tmp; + log_buf[0] = '\0'; + own_log_buf = true; + } } load_attr.log_buf = log_buf; load_attr.log_size = log_buf_size; - ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr); + load_attr.log_level = log_level; + ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr); if (ret >= 0) { - if (log_buf && load_attr.log_level) - pr_debug("verifier log:\n%s", log_buf); + if (log_level && own_log_buf) { + pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", + prog->name, log_buf); + } if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) { struct bpf_map *map; @@ -6505,8 +6710,8 @@ retry_load: if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) { cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warn("prog '%s': failed to bind .rodata map: %s\n", - prog->name, cp); + pr_warn("prog '%s': failed to bind map '%s': %s\n", + prog->name, map->real_name, cp); /* Don't fail hard if can't bind rodata. */ } } @@ -6517,49 +6722,41 @@ retry_load: goto out; } - if (!log_buf || errno == ENOSPC) { - log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, - log_buf_size << 1); - - free(log_buf); + if (log_level == 0) { + log_level = 1; goto retry_load; } - ret = errno ? -errno : -LIBBPF_ERRNO__LOAD; + /* On ENOSPC, increase log buffer size and retry, unless custom + * log_buf is specified. + * Be careful to not overflow u32, though. Kernel's log buf size limit + * isn't part of UAPI so it can always be bumped to full 4GB. So don't + * multiply by 2 unless we are sure we'll fit within 32 bits. + * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2). + */ + if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2) + goto retry_load; + + ret = -errno; cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warn("load bpf program failed: %s\n", cp); + pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp); pr_perm_msg(ret); - if (log_buf && log_buf[0] != '\0') { - ret = -LIBBPF_ERRNO__VERIFY; - pr_warn("-- BEGIN DUMP LOG ---\n"); - pr_warn("\n%s\n", log_buf); - pr_warn("-- END LOG --\n"); - } else if (insns_cnt >= BPF_MAXINSNS) { - pr_warn("Program too large (%d insns), at most %d insns\n", - insns_cnt, BPF_MAXINSNS); - ret = -LIBBPF_ERRNO__PROG2BIG; - } else if (prog->type != BPF_PROG_TYPE_KPROBE) { - /* Wrong program type? */ - int fd; - - load_attr.expected_attach_type = 0; - load_attr.log_buf = NULL; - load_attr.log_size = 0; - fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, prog_name, license, - insns, insns_cnt, &load_attr); - if (fd >= 0) { - close(fd); - ret = -LIBBPF_ERRNO__PROGTYPE; - goto out; - } + if (own_log_buf && log_buf && log_buf[0] != '\0') { + pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", + prog->name, log_buf); + } + if (insns_cnt >= BPF_MAXINSNS) { + pr_warn("prog '%s': program too large (%d insns), at most %d insns\n", + prog->name, insns_cnt, BPF_MAXINSNS); } out: - free(log_buf); + if (own_log_buf) + free(log_buf); return ret; } -static int bpf_program__record_externs(struct bpf_program *prog) +static int bpf_program_record_relos(struct bpf_program *prog) { struct bpf_object *obj = prog->obj; int i; @@ -6581,6 +6778,17 @@ static int bpf_program__record_externs(struct bpf_program *prog) ext->is_weak, false, BTF_KIND_FUNC, relo->insn_idx); break; + case RELO_CORE: { + struct bpf_core_relo cr = { + .insn_off = relo->insn_idx * 8, + .type_id = relo->core_relo->type_id, + .access_str_off = relo->core_relo->access_str_off, + .kind = relo->core_relo->kind, + }; + + bpf_gen__record_relo_core(obj->gen_loader, &cr); + break; + } default: continue; } @@ -6620,7 +6828,7 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog prog->name, prog->instances.nr); } if (obj->gen_loader) - bpf_program__record_externs(prog); + bpf_program_record_relos(prog); err = bpf_object_load_prog_instance(obj, prog, prog->insns, prog->insns_cnt, license, kern_ver, &fd); @@ -6749,14 +6957,16 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object return 0; } -static struct bpf_object * -__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, - const struct bpf_object_open_opts *opts) +static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz, + const struct bpf_object_open_opts *opts) { const char *obj_name, *kconfig, *btf_tmp_path; struct bpf_object *obj; char tmp_name[64]; int err; + char *log_buf; + size_t log_size; + __u32 log_level; if (elf_version(EV_CURRENT) == EV_NONE) { pr_warn("failed to init libelf for %s\n", @@ -6779,10 +6989,22 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, pr_debug("loading object '%s' from buffer\n", obj_name); } + log_buf = OPTS_GET(opts, kernel_log_buf, NULL); + log_size = OPTS_GET(opts, kernel_log_size, 0); + log_level = OPTS_GET(opts, kernel_log_level, 0); + if (log_size > UINT_MAX) + return ERR_PTR(-EINVAL); + if (log_size && !log_buf) + return ERR_PTR(-EINVAL); + obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); if (IS_ERR(obj)) return obj; + obj->log_buf = log_buf; + obj->log_size = log_size; + obj->log_level = log_level; + btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL); if (btf_tmp_path) { if (strlen(btf_tmp_path) >= PATH_MAX) { @@ -6836,7 +7058,7 @@ __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags) return NULL; pr_debug("loading %s\n", attr->file); - return __bpf_object__open(attr->file, NULL, 0, &opts); + return bpf_object_open(attr->file, NULL, 0, &opts); } struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) @@ -6862,7 +7084,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) pr_debug("loading %s\n", path); - return libbpf_ptr(__bpf_object__open(path, NULL, 0, opts)); + return libbpf_ptr(bpf_object_open(path, NULL, 0, opts)); } struct bpf_object * @@ -6872,7 +7094,7 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, if (!obj_buf || obj_buf_sz == 0) return libbpf_err_ptr(-EINVAL); - return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, opts)); + return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts)); } struct bpf_object * @@ -6889,7 +7111,7 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, if (!obj_buf || obj_buf_sz == 0) return errno = EINVAL, NULL; - return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts)); + return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, &opts)); } static int bpf_object_unload(struct bpf_object *obj) @@ -6920,10 +7142,6 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj) bpf_object__for_each_map(m, obj) { if (!bpf_map__is_internal(m)) continue; - if (!kernel_supports(obj, FEAT_GLOBAL_DATA)) { - pr_warn("kernel doesn't support global data\n"); - return -ENOTSUP; - } if (!kernel_supports(obj, FEAT_ARRAY_MMAP)) m->def.map_flags ^= BPF_F_MMAPABLE; } @@ -7246,14 +7464,10 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, return 0; } -int bpf_object__load_xattr(struct bpf_object_load_attr *attr) +static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) { - struct bpf_object *obj; int err, i; - if (!attr) - return libbpf_err(-EINVAL); - obj = attr->obj; if (!obj) return libbpf_err(-EINVAL); @@ -7263,7 +7477,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) } if (obj->gen_loader) - bpf_gen__init(obj->gen_loader, attr->log_level, obj->nr_programs, obj->nr_maps); + bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps); err = bpf_object__probe_loading(obj); err = err ? : bpf_object__load_vmlinux_btf(obj, false); @@ -7272,8 +7486,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) err = err ? : bpf_object__sanitize_maps(obj); err = err ? : bpf_object__init_kern_struct_ops_maps(obj); err = err ? : bpf_object__create_maps(obj); - err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : attr->target_btf_path); - err = err ? : bpf_object__load_progs(obj, attr->log_level); + err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); + err = err ? : bpf_object__load_progs(obj, extra_log_level); + err = err ? : bpf_object_init_prog_arrays(obj); if (obj->gen_loader) { /* reset FDs */ @@ -7317,13 +7532,14 @@ out: return libbpf_err(err); } -int bpf_object__load(struct bpf_object *obj) +int bpf_object__load_xattr(struct bpf_object_load_attr *attr) { - struct bpf_object_load_attr attr = { - .obj = obj, - }; + return bpf_object_load(attr->obj, attr->log_level, attr->target_btf_path); +} - return bpf_object__load_xattr(&attr); +int bpf_object__load(struct bpf_object *obj) +{ + return bpf_object_load(obj, 0, NULL); } static int make_parent_dir(const char *path) @@ -7712,6 +7928,9 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) char *pin_path = NULL; char buf[PATH_MAX]; + if (map->skipped) + continue; + if (path) { int len; @@ -8296,12 +8515,46 @@ __u32 bpf_program__flags(const struct bpf_program *prog) return prog->prog_flags; } -int bpf_program__set_extra_flags(struct bpf_program *prog, __u32 extra_flags) +int bpf_program__set_flags(struct bpf_program *prog, __u32 flags) +{ + if (prog->obj->loaded) + return libbpf_err(-EBUSY); + + prog->prog_flags = flags; + return 0; +} + +__u32 bpf_program__log_level(const struct bpf_program *prog) +{ + return prog->log_level; +} + +int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level) { if (prog->obj->loaded) return libbpf_err(-EBUSY); - prog->prog_flags |= extra_flags; + prog->log_level = log_level; + return 0; +} + +const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size) +{ + *log_size = prog->log_size; + return prog->log_buf; +} + +int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size) +{ + if (log_size && !log_buf) + return -EINVAL; + if (prog->log_size > UINT_MAX) + return -EINVAL; + if (prog->obj->loaded) + return -EBUSY; + + prog->log_buf = log_buf; + prog->log_size = log_size; return 0; } diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 4ec69f224342..a8b894dae633 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -24,6 +24,10 @@ extern "C" { #endif +LIBBPF_API __u32 libbpf_major_version(void); +LIBBPF_API __u32 libbpf_minor_version(void); +LIBBPF_API const char *libbpf_version_string(void); + enum libbpf_errno { __LIBBPF_ERRNO__START = 4000, @@ -104,12 +108,73 @@ struct bpf_object_open_opts { * struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux. */ const char *btf_custom_path; + /* Pointer to a buffer for storing kernel logs for applicable BPF + * commands. Valid kernel_log_size has to be specified as well and are + * passed-through to bpf() syscall. Keep in mind that kernel might + * fail operation with -ENOSPC error if provided buffer is too small + * to contain entire log output. + * See the comment below for kernel_log_level for interaction between + * log_buf and log_level settings. + * + * If specified, this log buffer will be passed for: + * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden + * with bpf_program__set_log() on per-program level, to get + * BPF verifier log output. + * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get + * BTF sanity checking log. + * + * Each BPF command (BPF_BTF_LOAD or BPF_PROG_LOAD) will overwrite + * previous contents, so if you need more fine-grained control, set + * per-program buffer with bpf_program__set_log_buf() to preserve each + * individual program's verification log. Keep using kernel_log_buf + * for BTF verification log, if necessary. + */ + char *kernel_log_buf; + size_t kernel_log_size; + /* + * Log level can be set independently from log buffer. Log_level=0 + * means that libbpf will attempt loading BTF or program without any + * logging requested, but will retry with either its own or custom log + * buffer, if provided, and log_level=1 on any error. + * And vice versa, setting log_level>0 will request BTF or prog + * loading with verbose log from the first attempt (and as such also + * for successfully loaded BTF or program), and the actual log buffer + * could be either libbpf's own auto-allocated log buffer, if + * kernel_log_buffer is NULL, or user-provided custom kernel_log_buf. + * If user didn't provide custom log buffer, libbpf will emit captured + * logs through its print callback. + */ + __u32 kernel_log_level; + + size_t :0; }; -#define bpf_object_open_opts__last_field btf_custom_path +#define bpf_object_open_opts__last_field kernel_log_level LIBBPF_API struct bpf_object *bpf_object__open(const char *path); + +/** + * @brief **bpf_object__open_file()** creates a bpf_object by opening + * the BPF ELF object file pointed to by the passed path and loading it + * into memory. + * @param path BPF object file path + * @param opts options for how to load the bpf object, this parameter is + * optional and can be set to NULL + * @return pointer to the new bpf_object; or NULL is returned on error, + * error code is stored in errno + */ LIBBPF_API struct bpf_object * bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts); + +/** + * @brief **bpf_object__open_mem()** creates a bpf_object by reading + * the BPF objects raw bytes from a memory buffer containing a valid + * BPF ELF object file. + * @param obj_buf pointer to the buffer containing ELF file bytes + * @param obj_buf_sz number of bytes in the buffer + * @param opts options for how to load the bpf object + * @return pointer to the new bpf_object; or NULL is returned on error, + * error code is stored in errno + */ LIBBPF_API struct bpf_object * bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, const struct bpf_object_open_opts *opts); @@ -149,6 +214,7 @@ struct bpf_object_load_attr { /* Load/unload object into/from kernel */ LIBBPF_API int bpf_object__load(struct bpf_object *obj); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__load() instead") LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr); LIBBPF_DEPRECATED_SINCE(0, 6, "bpf_object__unload() is deprecated, use bpf_object__close() instead") LIBBPF_API int bpf_object__unload(struct bpf_object *obj); @@ -344,10 +410,41 @@ struct bpf_uprobe_opts { }; #define bpf_uprobe_opts__last_field retprobe +/** + * @brief **bpf_program__attach_uprobe()** attaches a BPF program + * to the userspace function which is found by binary path and + * offset. You can optionally specify a particular proccess to attach + * to. You can also optionally attach the program to the function + * exit instead of entry. + * + * @param prog BPF program to attach + * @param retprobe Attach to function exit + * @param pid Process ID to attach the uprobe to, 0 for self (own process), + * -1 for all processes + * @param binary_path Path to binary that contains the function symbol + * @param func_offset Offset within the binary of the function symbol + * @return Reference to the newly created BPF link; or NULL is returned on error, + * error code is stored in errno + */ LIBBPF_API struct bpf_link * bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe, pid_t pid, const char *binary_path, size_t func_offset); + +/** + * @brief **bpf_program__attach_uprobe_opts()** is just like + * bpf_program__attach_uprobe() except with a options struct + * for various configurations. + * + * @param prog BPF program to attach + * @param pid Process ID to attach the uprobe to, 0 for self (own process), + * -1 for all processes + * @param binary_path Path to binary that contains the function symbol + * @param func_offset Offset within the binary of the function symbol + * @param opts Options for altering program attachment + * @return Reference to the newly created BPF link; or NULL is returned on error, + * error code is stored in errno + */ LIBBPF_API struct bpf_link * bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, const char *binary_path, size_t func_offset, @@ -494,7 +591,16 @@ bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type); LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog); -LIBBPF_API int bpf_program__set_extra_flags(struct bpf_program *prog, __u32 extra_flags); +LIBBPF_API int bpf_program__set_flags(struct bpf_program *prog, __u32 flags); + +/* Per-program log level and log buffer getters/setters. + * See bpf_object_open_opts comments regarding log_level and log_buf + * interactions. + */ +LIBBPF_API __u32 bpf_program__log_level(const struct bpf_program *prog); +LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level); +LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size); +LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size); LIBBPF_API int bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, @@ -676,6 +782,7 @@ struct bpf_prog_load_attr { int prog_flags; }; +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open() and bpf_object__load() instead") LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, struct bpf_object **pobj, int *prog_fd); LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open() and bpf_object__load() instead") @@ -1031,11 +1138,11 @@ struct bpf_object_skeleton { struct bpf_object **obj; int map_cnt; - int map_skel_sz; /* sizeof(struct bpf_skeleton_map) */ + int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ struct bpf_map_skeleton *maps; int prog_cnt; - int prog_skel_sz; /* sizeof(struct bpf_skeleton_prog) */ + int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ struct bpf_prog_skeleton *progs; }; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 6a59514a48cf..4d483af7dba6 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -391,6 +391,7 @@ LIBBPF_0.6.0 { global: bpf_map__map_extra; bpf_map__set_map_extra; + bpf_map_create; bpf_object__next_map; bpf_object__next_program; bpf_object__prev_map; @@ -400,7 +401,7 @@ LIBBPF_0.6.0 { bpf_program__flags; bpf_program__insn_cnt; bpf_program__insns; - bpf_program__set_extra_flags; + bpf_program__set_flags; btf__add_btf; btf__add_decl_tag; btf__add_type_tag; @@ -410,8 +411,20 @@ LIBBPF_0.6.0 { btf__type_cnt; btf_dump__new; btf_dump__new_deprecated; + libbpf_major_version; + libbpf_minor_version; + libbpf_version_string; perf_buffer__new; perf_buffer__new_deprecated; perf_buffer__new_raw; perf_buffer__new_raw_deprecated; } LIBBPF_0.5.0; + +LIBBPF_0.7.0 { + global: + bpf_btf_load; + bpf_program__log_buf; + bpf_program__log_level; + bpf_program__set_log_buf; + bpf_program__set_log_level; +}; diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h index b21cefc9c3b6..000e37798ff2 100644 --- a/tools/lib/bpf/libbpf_common.h +++ b/tools/lib/bpf/libbpf_common.h @@ -40,6 +40,11 @@ #else #define __LIBBPF_MARK_DEPRECATED_0_7(X) #endif +#if __LIBBPF_CURRENT_VERSION_GEQ(0, 8) +#define __LIBBPF_MARK_DEPRECATED_0_8(X) X +#else +#define __LIBBPF_MARK_DEPRECATED_0_8(X) +#endif /* This set of internal macros allows to do "function overloading" based on * number of arguments provided by used in backwards-compatible way during the diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index f7ac349650a1..355c41019aed 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -172,7 +172,7 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size) struct btf; struct btf_type; -struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id); +struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id); const char *btf_kind_str(const struct btf_type *t); const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id); @@ -277,27 +277,7 @@ int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz); int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz); int libbpf__load_raw_btf(const char *raw_types, size_t types_len, const char *str_sec, size_t str_len); - -struct bpf_create_map_params { - const char *name; - enum bpf_map_type map_type; - __u32 map_flags; - __u32 key_size; - __u32 value_size; - __u32 max_entries; - __u32 numa_node; - __u32 btf_fd; - __u32 btf_key_type_id; - __u32 btf_value_type_id; - __u32 map_ifindex; - union { - __u32 inner_map_fd; - __u32 btf_vmlinux_value_type_id; - }; - __u64 map_extra; -}; - -int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr); +int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level); struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf); void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 02c401e314c7..4bdec69523a7 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -164,7 +164,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len, memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len); memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len); - btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false); + btf_fd = bpf_btf_load(raw_btf, btf_len, NULL); free(raw_btf); return btf_fd; @@ -201,7 +201,6 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) { int key_size, value_size, max_entries, map_flags; __u32 btf_key_type_id = 0, btf_value_type_id = 0; - struct bpf_create_map_attr attr = {}; int fd = -1, btf_fd = -1, fd_inner; key_size = sizeof(__u32); @@ -271,34 +270,35 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS || map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { + LIBBPF_OPTS(bpf_map_create_opts, opts); + /* TODO: probe for device, once libbpf has a function to create * map-in-map for offload */ if (ifindex) return false; - fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH, - sizeof(__u32), sizeof(__u32), 1, 0); + fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, + sizeof(__u32), sizeof(__u32), 1, NULL); if (fd_inner < 0) return false; - fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32), - fd_inner, 1, 0); + + opts.inner_map_fd = fd_inner; + fd = bpf_map_create(map_type, NULL, sizeof(__u32), sizeof(__u32), 1, &opts); close(fd_inner); } else { + LIBBPF_OPTS(bpf_map_create_opts, opts); + /* Note: No other restriction on map type probes for offload */ - attr.map_type = map_type; - attr.key_size = key_size; - attr.value_size = value_size; - attr.max_entries = max_entries; - attr.map_flags = map_flags; - attr.map_ifindex = ifindex; + opts.map_flags = map_flags; + opts.map_ifindex = ifindex; if (btf_fd >= 0) { - attr.btf_fd = btf_fd; - attr.btf_key_type_id = btf_key_type_id; - attr.btf_value_type_id = btf_value_type_id; + opts.btf_fd = btf_fd; + opts.btf_key_type_id = btf_key_type_id; + opts.btf_value_type_id = btf_value_type_id; } - fd = bpf_create_map_xattr(&attr); + fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts); } if (fd >= 0) close(fd); diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h index dd56d76f291c..0fefefc3500b 100644 --- a/tools/lib/bpf/libbpf_version.h +++ b/tools/lib/bpf/libbpf_version.h @@ -4,6 +4,6 @@ #define __LIBBPF_VERSION_H #define LIBBPF_MAJOR_VERSION 0 -#define LIBBPF_MINOR_VERSION 6 +#define LIBBPF_MINOR_VERSION 7 #endif /* __LIBBPF_VERSION_H */ diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 594b206fa674..9aa016fb55aa 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -210,6 +210,7 @@ void bpf_linker__free(struct bpf_linker *linker) } free(linker->secs); + free(linker->glob_syms); free(linker); } @@ -1999,7 +2000,7 @@ add_sym: static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj) { struct src_sec *src_symtab = &obj->secs[obj->symtab_sec_idx]; - struct dst_sec *dst_symtab = &linker->secs[linker->symtab_sec_idx]; + struct dst_sec *dst_symtab; int i, err; for (i = 1; i < obj->sec_cnt; i++) { @@ -2032,6 +2033,9 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob return -1; } + /* add_dst_sec() above could have invalidated linker->secs */ + dst_symtab = &linker->secs[linker->symtab_sec_idx]; + /* shdr->sh_link points to SYMTAB */ dst_sec->shdr->sh_link = linker->symtab_sec_idx; diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c index b5b8956a1be8..32464f0ab4b1 100644 --- a/tools/lib/bpf/relo_core.c +++ b/tools/lib/bpf/relo_core.c @@ -1,6 +1,60 @@ // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2019 Facebook */ +#ifdef __KERNEL__ +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/string.h> +#include <linux/bpf_verifier.h> +#include "relo_core.h" + +static const char *btf_kind_str(const struct btf_type *t) +{ + return btf_type_str(t); +} + +static bool is_ldimm64_insn(struct bpf_insn *insn) +{ + return insn->code == (BPF_LD | BPF_IMM | BPF_DW); +} + +static const struct btf_type * +skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id) +{ + return btf_type_skip_modifiers(btf, id, res_id); +} + +static const char *btf__name_by_offset(const struct btf *btf, u32 offset) +{ + return btf_name_by_offset(btf, offset); +} + +static s64 btf__resolve_size(const struct btf *btf, u32 type_id) +{ + const struct btf_type *t; + int size; + + t = btf_type_by_id(btf, type_id); + t = btf_resolve_size(btf, t, &size); + if (IS_ERR(t)) + return PTR_ERR(t); + return size; +} + +enum libbpf_print_level { + LIBBPF_WARN, + LIBBPF_INFO, + LIBBPF_DEBUG, +}; + +#undef pr_warn +#undef pr_info +#undef pr_debug +#define pr_warn(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) +#define pr_info(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) +#define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) +#define libbpf_print(level, fmt, ...) bpf_log((void *)prog_name, fmt, ##__VA_ARGS__) +#else #include <stdio.h> #include <string.h> #include <errno.h> @@ -12,33 +66,7 @@ #include "btf.h" #include "str_error.h" #include "libbpf_internal.h" - -#define BPF_CORE_SPEC_MAX_LEN 64 - -/* represents BPF CO-RE field or array element accessor */ -struct bpf_core_accessor { - __u32 type_id; /* struct/union type or array element type */ - __u32 idx; /* field index or array index */ - const char *name; /* field name or NULL for array accessor */ -}; - -struct bpf_core_spec { - const struct btf *btf; - /* high-level spec: named fields and array indices only */ - struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN]; - /* original unresolved (no skip_mods_or_typedefs) root type ID */ - __u32 root_type_id; - /* CO-RE relocation kind */ - enum bpf_core_relo_kind relo_kind; - /* high-level spec length */ - int len; - /* raw, low-level spec: 1-to-1 with accessor spec string */ - int raw_spec[BPF_CORE_SPEC_MAX_LEN]; - /* raw spec length */ - int raw_len; - /* field bit offset represented by spec */ - __u32 bit_offset; -}; +#endif static bool is_flex_arr(const struct btf *btf, const struct bpf_core_accessor *acc, @@ -51,25 +79,25 @@ static bool is_flex_arr(const struct btf *btf, return false; /* has to be the last member of enclosing struct */ - t = btf__type_by_id(btf, acc->type_id); + t = btf_type_by_id(btf, acc->type_id); return acc->idx == btf_vlen(t) - 1; } static const char *core_relo_kind_str(enum bpf_core_relo_kind kind) { switch (kind) { - case BPF_FIELD_BYTE_OFFSET: return "byte_off"; - case BPF_FIELD_BYTE_SIZE: return "byte_sz"; - case BPF_FIELD_EXISTS: return "field_exists"; - case BPF_FIELD_SIGNED: return "signed"; - case BPF_FIELD_LSHIFT_U64: return "lshift_u64"; - case BPF_FIELD_RSHIFT_U64: return "rshift_u64"; - case BPF_TYPE_ID_LOCAL: return "local_type_id"; - case BPF_TYPE_ID_TARGET: return "target_type_id"; - case BPF_TYPE_EXISTS: return "type_exists"; - case BPF_TYPE_SIZE: return "type_size"; - case BPF_ENUMVAL_EXISTS: return "enumval_exists"; - case BPF_ENUMVAL_VALUE: return "enumval_value"; + case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off"; + case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz"; + case BPF_CORE_FIELD_EXISTS: return "field_exists"; + case BPF_CORE_FIELD_SIGNED: return "signed"; + case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64"; + case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64"; + case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id"; + case BPF_CORE_TYPE_ID_TARGET: return "target_type_id"; + case BPF_CORE_TYPE_EXISTS: return "type_exists"; + case BPF_CORE_TYPE_SIZE: return "type_size"; + case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists"; + case BPF_CORE_ENUMVAL_VALUE: return "enumval_value"; default: return "unknown"; } } @@ -77,12 +105,12 @@ static const char *core_relo_kind_str(enum bpf_core_relo_kind kind) static bool core_relo_is_field_based(enum bpf_core_relo_kind kind) { switch (kind) { - case BPF_FIELD_BYTE_OFFSET: - case BPF_FIELD_BYTE_SIZE: - case BPF_FIELD_EXISTS: - case BPF_FIELD_SIGNED: - case BPF_FIELD_LSHIFT_U64: - case BPF_FIELD_RSHIFT_U64: + case BPF_CORE_FIELD_BYTE_OFFSET: + case BPF_CORE_FIELD_BYTE_SIZE: + case BPF_CORE_FIELD_EXISTS: + case BPF_CORE_FIELD_SIGNED: + case BPF_CORE_FIELD_LSHIFT_U64: + case BPF_CORE_FIELD_RSHIFT_U64: return true; default: return false; @@ -92,10 +120,10 @@ static bool core_relo_is_field_based(enum bpf_core_relo_kind kind) static bool core_relo_is_type_based(enum bpf_core_relo_kind kind) { switch (kind) { - case BPF_TYPE_ID_LOCAL: - case BPF_TYPE_ID_TARGET: - case BPF_TYPE_EXISTS: - case BPF_TYPE_SIZE: + case BPF_CORE_TYPE_ID_LOCAL: + case BPF_CORE_TYPE_ID_TARGET: + case BPF_CORE_TYPE_EXISTS: + case BPF_CORE_TYPE_SIZE: return true; default: return false; @@ -105,8 +133,8 @@ static bool core_relo_is_type_based(enum bpf_core_relo_kind kind) static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) { switch (kind) { - case BPF_ENUMVAL_EXISTS: - case BPF_ENUMVAL_VALUE: + case BPF_CORE_ENUMVAL_EXISTS: + case BPF_CORE_ENUMVAL_VALUE: return true; default: return false; @@ -150,7 +178,7 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access * string to specify enumerator's value index that need to be relocated. */ -static int bpf_core_parse_spec(const struct btf *btf, +static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf, __u32 type_id, const char *spec_str, enum bpf_core_relo_kind relo_kind, @@ -272,8 +300,8 @@ static int bpf_core_parse_spec(const struct btf *btf, return sz; spec->bit_offset += access_idx * sz * 8; } else { - pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n", - type_id, spec_str, i, id, btf_kind_str(t)); + pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n", + prog_name, type_id, spec_str, i, id, btf_kind_str(t)); return -EINVAL; } } @@ -346,8 +374,6 @@ recur: targ_id = btf_array(targ_type)->type; goto recur; default: - pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n", - btf_kind(local_type), local_id, targ_id); return 0; } } @@ -388,7 +414,7 @@ static int bpf_core_match_member(const struct btf *local_btf, return 0; local_id = local_acc->type_id; - local_type = btf__type_by_id(local_btf, local_id); + local_type = btf_type_by_id(local_btf, local_id); local_member = btf_members(local_type) + local_acc->idx; local_name = btf__name_by_offset(local_btf, local_member->name_off); @@ -571,7 +597,7 @@ static int bpf_core_calc_field_relo(const char *prog_name, *field_sz = 0; - if (relo->kind == BPF_FIELD_EXISTS) { + if (relo->kind == BPF_CORE_FIELD_EXISTS) { *val = spec ? 1 : 0; return 0; } @@ -580,11 +606,11 @@ static int bpf_core_calc_field_relo(const char *prog_name, return -EUCLEAN; /* request instruction poisoning */ acc = &spec->spec[spec->len - 1]; - t = btf__type_by_id(spec->btf, acc->type_id); + t = btf_type_by_id(spec->btf, acc->type_id); /* a[n] accessor needs special handling */ if (!acc->name) { - if (relo->kind == BPF_FIELD_BYTE_OFFSET) { + if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) { *val = spec->bit_offset / 8; /* remember field size for load/store mem size */ sz = btf__resolve_size(spec->btf, acc->type_id); @@ -592,7 +618,7 @@ static int bpf_core_calc_field_relo(const char *prog_name, return -EINVAL; *field_sz = sz; *type_id = acc->type_id; - } else if (relo->kind == BPF_FIELD_BYTE_SIZE) { + } else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) { sz = btf__resolve_size(spec->btf, acc->type_id); if (sz < 0) return -EINVAL; @@ -644,36 +670,36 @@ static int bpf_core_calc_field_relo(const char *prog_name, *validate = !bitfield; switch (relo->kind) { - case BPF_FIELD_BYTE_OFFSET: + case BPF_CORE_FIELD_BYTE_OFFSET: *val = byte_off; if (!bitfield) { *field_sz = byte_sz; *type_id = field_type_id; } break; - case BPF_FIELD_BYTE_SIZE: + case BPF_CORE_FIELD_BYTE_SIZE: *val = byte_sz; break; - case BPF_FIELD_SIGNED: + case BPF_CORE_FIELD_SIGNED: /* enums will be assumed unsigned */ *val = btf_is_enum(mt) || (btf_int_encoding(mt) & BTF_INT_SIGNED); if (validate) *validate = true; /* signedness is never ambiguous */ break; - case BPF_FIELD_LSHIFT_U64: + case BPF_CORE_FIELD_LSHIFT_U64: #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ *val = 64 - (bit_off + bit_sz - byte_off * 8); #else *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); #endif break; - case BPF_FIELD_RSHIFT_U64: + case BPF_CORE_FIELD_RSHIFT_U64: *val = 64 - bit_sz; if (validate) *validate = true; /* right shift is never ambiguous */ break; - case BPF_FIELD_EXISTS: + case BPF_CORE_FIELD_EXISTS: default: return -EOPNOTSUPP; } @@ -694,20 +720,20 @@ static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo, } switch (relo->kind) { - case BPF_TYPE_ID_TARGET: + case BPF_CORE_TYPE_ID_TARGET: *val = spec->root_type_id; break; - case BPF_TYPE_EXISTS: + case BPF_CORE_TYPE_EXISTS: *val = 1; break; - case BPF_TYPE_SIZE: + case BPF_CORE_TYPE_SIZE: sz = btf__resolve_size(spec->btf, spec->root_type_id); if (sz < 0) return -EINVAL; *val = sz; break; - case BPF_TYPE_ID_LOCAL: - /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */ + case BPF_CORE_TYPE_ID_LOCAL: + /* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */ default: return -EOPNOTSUPP; } @@ -723,13 +749,13 @@ static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, const struct btf_enum *e; switch (relo->kind) { - case BPF_ENUMVAL_EXISTS: + case BPF_CORE_ENUMVAL_EXISTS: *val = spec ? 1 : 0; break; - case BPF_ENUMVAL_VALUE: + case BPF_CORE_ENUMVAL_VALUE: if (!spec) return -EUCLEAN; /* request instruction poisoning */ - t = btf__type_by_id(spec->btf, spec->spec[0].type_id); + t = btf_type_by_id(spec->btf, spec->spec[0].type_id); e = btf_enum(t) + spec->spec[0].idx; *val = e->val; break; @@ -805,8 +831,8 @@ static int bpf_core_calc_relo(const char *prog_name, if (res->orig_sz != res->new_sz) { const struct btf_type *orig_t, *new_t; - orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id); - new_t = btf__type_by_id(targ_spec->btf, res->new_type_id); + orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id); + new_t = btf_type_by_id(targ_spec->btf, res->new_type_id); /* There are two use cases in which it's safe to * adjust load/store's mem size: @@ -1045,7 +1071,7 @@ poison: * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>, * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b */ -static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec) +static void bpf_core_dump_spec(const char *prog_name, int level, const struct bpf_core_spec *spec) { const struct btf_type *t; const struct btf_enum *e; @@ -1054,7 +1080,7 @@ static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec) int i; type_id = spec->root_type_id; - t = btf__type_by_id(spec->btf, type_id); + t = btf_type_by_id(spec->btf, type_id); s = btf__name_by_offset(spec->btf, t->name_off); libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s); @@ -1147,9 +1173,12 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, const struct bpf_core_relo *relo, int relo_idx, const struct btf *local_btf, - struct bpf_core_cand_list *cands) + struct bpf_core_cand_list *cands, + struct bpf_core_spec *specs_scratch) { - struct bpf_core_spec local_spec, cand_spec, targ_spec = {}; + struct bpf_core_spec *local_spec = &specs_scratch[0]; + struct bpf_core_spec *cand_spec = &specs_scratch[1]; + struct bpf_core_spec *targ_spec = &specs_scratch[2]; struct bpf_core_relo_res cand_res, targ_res; const struct btf_type *local_type; const char *local_name; @@ -1158,10 +1187,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, int i, j, err; local_id = relo->type_id; - local_type = btf__type_by_id(local_btf, local_id); - if (!local_type) - return -EINVAL; - + local_type = btf_type_by_id(local_btf, local_id); local_name = btf__name_by_offset(local_btf, local_type->name_off); if (!local_name) return -EINVAL; @@ -1170,7 +1196,8 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, if (str_is_empty(spec_str)) return -EINVAL; - err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec); + err = bpf_core_parse_spec(prog_name, local_btf, local_id, spec_str, + relo->kind, local_spec); if (err) { pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n", prog_name, relo_idx, local_id, btf_kind_str(local_type), @@ -1181,15 +1208,15 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); - bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec); + bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, local_spec); libbpf_print(LIBBPF_DEBUG, "\n"); /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */ - if (relo->kind == BPF_TYPE_ID_LOCAL) { + if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) { targ_res.validate = true; targ_res.poison = false; - targ_res.orig_val = local_spec.root_type_id; - targ_res.new_val = local_spec.root_type_id; + targ_res.orig_val = local_spec->root_type_id; + targ_res.new_val = local_spec->root_type_id; goto patch_insn; } @@ -1202,38 +1229,38 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, for (i = 0, j = 0; i < cands->len; i++) { - err = bpf_core_spec_match(&local_spec, cands->cands[i].btf, - cands->cands[i].id, &cand_spec); + err = bpf_core_spec_match(local_spec, cands->cands[i].btf, + cands->cands[i].id, cand_spec); if (err < 0) { pr_warn("prog '%s': relo #%d: error matching candidate #%d ", prog_name, relo_idx, i); - bpf_core_dump_spec(LIBBPF_WARN, &cand_spec); + bpf_core_dump_spec(prog_name, LIBBPF_WARN, cand_spec); libbpf_print(LIBBPF_WARN, ": %d\n", err); return err; } pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog_name, relo_idx, err == 0 ? "non-matching" : "matching", i); - bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec); + bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, cand_spec); libbpf_print(LIBBPF_DEBUG, "\n"); if (err == 0) continue; - err = bpf_core_calc_relo(prog_name, relo, relo_idx, &local_spec, &cand_spec, &cand_res); + err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res); if (err) return err; if (j == 0) { targ_res = cand_res; - targ_spec = cand_spec; - } else if (cand_spec.bit_offset != targ_spec.bit_offset) { + *targ_spec = *cand_spec; + } else if (cand_spec->bit_offset != targ_spec->bit_offset) { /* if there are many field relo candidates, they * should all resolve to the same bit offset */ pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n", - prog_name, relo_idx, cand_spec.bit_offset, - targ_spec.bit_offset); + prog_name, relo_idx, cand_spec->bit_offset, + targ_spec->bit_offset); return -EINVAL; } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) { /* all candidates should result in the same relocation @@ -1251,7 +1278,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, } /* - * For BPF_FIELD_EXISTS relo or when used BPF program has field + * For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field * existence checks or kernel version/config checks, it's expected * that we might not find any candidates. In this case, if field * wasn't found in any candidate, the list of candidates shouldn't @@ -1277,7 +1304,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, prog_name, relo_idx); /* calculate single target relo result explicitly */ - err = bpf_core_calc_relo(prog_name, relo, relo_idx, &local_spec, NULL, &targ_res); + err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, &targ_res); if (err) return err; } diff --git a/tools/lib/bpf/relo_core.h b/tools/lib/bpf/relo_core.h index 3b9f8f18346c..17799819ad7c 100644 --- a/tools/lib/bpf/relo_core.h +++ b/tools/lib/bpf/relo_core.h @@ -4,81 +4,10 @@ #ifndef __RELO_CORE_H #define __RELO_CORE_H -/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value - * has to be adjusted by relocations. - */ -enum bpf_core_relo_kind { - BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ - BPF_FIELD_BYTE_SIZE = 1, /* field size in bytes */ - BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ - BPF_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ - BPF_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ - BPF_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ - BPF_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ - BPF_TYPE_ID_TARGET = 7, /* type ID in target kernel */ - BPF_TYPE_EXISTS = 8, /* type existence in target kernel */ - BPF_TYPE_SIZE = 9, /* type size in bytes */ - BPF_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ - BPF_ENUMVAL_VALUE = 11, /* enum value integer value */ -}; - -/* The minimum bpf_core_relo checked by the loader - * - * CO-RE relocation captures the following data: - * - insn_off - instruction offset (in bytes) within a BPF program that needs - * its insn->imm field to be relocated with actual field info; - * - type_id - BTF type ID of the "root" (containing) entity of a relocatable - * type or field; - * - access_str_off - offset into corresponding .BTF string section. String - * interpretation depends on specific relocation kind: - * - for field-based relocations, string encodes an accessed field using - * a sequence of field and array indices, separated by colon (:). It's - * conceptually very close to LLVM's getelementptr ([0]) instruction's - * arguments for identifying offset to a field. - * - for type-based relocations, strings is expected to be just "0"; - * - for enum value-based relocations, string contains an index of enum - * value within its enum type; - * - * Example to provide a better feel. - * - * struct sample { - * int a; - * struct { - * int b[10]; - * }; - * }; - * - * struct sample *s = ...; - * int x = &s->a; // encoded as "0:0" (a is field #0) - * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, - * // b is field #0 inside anon struct, accessing elem #5) - * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) - * - * type_id for all relocs in this example will capture BTF type id of - * `struct sample`. - * - * Such relocation is emitted when using __builtin_preserve_access_index() - * Clang built-in, passing expression that captures field address, e.g.: - * - * bpf_probe_read(&dst, sizeof(dst), - * __builtin_preserve_access_index(&src->a.b.c)); - * - * In this case Clang will emit field relocation recording necessary data to - * be able to find offset of embedded `a.b.c` field within `src` struct. - * - * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction - */ -struct bpf_core_relo { - __u32 insn_off; - __u32 type_id; - __u32 access_str_off; - enum bpf_core_relo_kind kind; -}; +#include <linux/bpf.h> struct bpf_core_cand { const struct btf *btf; - const struct btf_type *t; - const char *name; __u32 id; }; @@ -88,11 +17,39 @@ struct bpf_core_cand_list { int len; }; +#define BPF_CORE_SPEC_MAX_LEN 64 + +/* represents BPF CO-RE field or array element accessor */ +struct bpf_core_accessor { + __u32 type_id; /* struct/union type or array element type */ + __u32 idx; /* field index or array index */ + const char *name; /* field name or NULL for array accessor */ +}; + +struct bpf_core_spec { + const struct btf *btf; + /* high-level spec: named fields and array indices only */ + struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN]; + /* original unresolved (no skip_mods_or_typedefs) root type ID */ + __u32 root_type_id; + /* CO-RE relocation kind */ + enum bpf_core_relo_kind relo_kind; + /* high-level spec length */ + int len; + /* raw, low-level spec: 1-to-1 with accessor spec string */ + int raw_spec[BPF_CORE_SPEC_MAX_LEN]; + /* raw spec length */ + int raw_len; + /* field bit offset represented by spec */ + __u32 bit_offset; +}; + int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, int insn_idx, const struct bpf_core_relo *relo, int relo_idx, const struct btf *local_btf, - struct bpf_core_cand_list *cands); + struct bpf_core_cand_list *cands, + struct bpf_core_spec *specs_scratch); int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id); diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h index 9cf66702fa8d..0b84d8e6b72a 100644 --- a/tools/lib/bpf/skel_internal.h +++ b/tools/lib/bpf/skel_internal.h @@ -7,6 +7,16 @@ #include <sys/syscall.h> #include <sys/mman.h> +#ifndef __NR_bpf +# if defined(__mips__) && defined(_ABIO32) +# define __NR_bpf 4355 +# elif defined(__mips__) && defined(_ABIN32) +# define __NR_bpf 6319 +# elif defined(__mips__) && defined(_ABI64) +# define __NR_bpf 5315 +# endif +#endif + /* This file is a base header for auto-generated *.lskel.h files. * Its contents will change and may become part of auto-generation in the future. * @@ -65,8 +75,7 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) int map_fd = -1, prog_fd = -1, key = 0, err; union bpf_attr attr; - map_fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, - opts->data_sz, 1, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1, NULL); if (map_fd < 0) { opts->errstr = "failed to create loader map"; err = -errno; diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index fdb22f5405c9..e8d94c6dd3bc 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -35,6 +35,11 @@ #include "libbpf_internal.h" #include "xsk.h" +/* entire xsk.h and xsk.c is going away in libbpf 1.0, so ignore all internal + * uses of deprecated APIs + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + #ifndef SOL_XDP #define SOL_XDP 283 #endif @@ -364,7 +369,6 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area, static enum xsk_prog get_xsk_prog(void) { enum xsk_prog detected = XSK_PROG_FALLBACK; - struct bpf_create_map_attr map_attr; __u32 size_out, retval, duration; char data_in = 0, data_out; struct bpf_insn insns[] = { @@ -376,13 +380,7 @@ static enum xsk_prog get_xsk_prog(void) }; int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns); - memset(&map_attr, 0, sizeof(map_attr)); - map_attr.map_type = BPF_MAP_TYPE_XSKMAP; - map_attr.key_size = sizeof(int); - map_attr.value_size = sizeof(int); - map_attr.max_entries = 1; - - map_fd = bpf_create_map_xattr(&map_attr); + map_fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, NULL, sizeof(int), sizeof(int), 1, NULL); if (map_fd < 0) return detected; @@ -586,8 +584,8 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk) if (max_queues < 0) return max_queues; - fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map", - sizeof(int), sizeof(int), max_queues, 0); + fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, "xsks_map", + sizeof(int), sizeof(int), max_queues, NULL); if (fd < 0) return fd; diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 81a4c543ff7e..4b384c907027 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -375,6 +375,7 @@ static int read_symbols(struct elf *elf) return -1; } memset(sym, 0, sizeof(*sym)); + INIT_LIST_HEAD(&sym->pv_target); sym->alias = sym; sym->idx = i; diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c index c90c7084e45a..bdf699f6552b 100644 --- a/tools/objtool/objtool.c +++ b/tools/objtool/objtool.c @@ -153,6 +153,10 @@ void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func) !strcmp(func->name, "_paravirt_ident_64")) return; + /* already added this function */ + if (!list_empty(&func->pv_target)) + return; + list_add(&func->pv_target, &f->pv_ops[idx].targets); f->pv_ops[idx].clean = false; } diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 07e65a061fd3..3df74cf5651a 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -271,8 +271,6 @@ endif FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS) FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS) -FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS) -FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS) FEATURE_CHECK_LDFLAGS-libaio = -lrt @@ -1010,6 +1008,9 @@ ifndef NO_AUXTRACE ifndef NO_AUXTRACE $(call detected,CONFIG_AUXTRACE) CFLAGS += -DHAVE_AUXTRACE_SUPPORT + ifeq ($(feature-reallocarray), 0) + CFLAGS += -DCOMPAT_NEED_REALLOCARRAY + endif endif endif diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl index 1ca7bc337932..e2c481fcede6 100644 --- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl +++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl @@ -363,3 +363,4 @@ 446 n64 landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 n64 process_mrelease sys_process_mrelease +449 n64 futex_waitv sys_futex_waitv diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 7bef917cc84e..15109af9d075 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -528,3 +528,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index df5261e5cfe1..ed9c5c2eafad 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -451,3 +451,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv sys_futex_waitv diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index bc5259db5fd9..b9d6306cc14e 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -820,7 +820,7 @@ static int __cmd_inject(struct perf_inject *inject) inject->tool.ordered_events = true; inject->tool.ordering_requires_timestamps = true; /* Allow space in the header for new attributes */ - output_data_offset = 4096; + output_data_offset = roundup(8192 + session->header.data_offset, 4096); if (inject->strip) strip_init(inject); } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 8167ebfe776a..8ae400429870 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -619,14 +619,17 @@ static int report__browse_hists(struct report *rep) int ret; struct perf_session *session = rep->session; struct evlist *evlist = session->evlist; - const char *help = perf_tip(system_path(TIPDIR)); + char *help = NULL, *path = NULL; - if (help == NULL) { + path = system_path(TIPDIR); + if (perf_tip(&help, path) || help == NULL) { /* fallback for people who don't install perf ;-) */ - help = perf_tip(DOCDIR); - if (help == NULL) - help = "Cannot load tips.txt file, please install perf!"; + free(path); + path = system_path(DOCDIR); + if (perf_tip(&help, path) || help == NULL) + help = strdup("Cannot load tips.txt file, please install perf!"); } + free(path); switch (use_browser) { case 1: @@ -651,7 +654,7 @@ static int report__browse_hists(struct report *rep) ret = evlist__tty_browse_hists(evlist, rep, help); break; } - + free(help); return ret; } diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c index 329f77f592f4..573490530194 100644 --- a/tools/perf/tests/bpf.c +++ b/tools/perf/tests/bpf.c @@ -296,9 +296,13 @@ static int check_env(void) return err; } +/* temporarily disable libbpf deprecation warnings */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns, ARRAY_SIZE(insns), license, kver_int, NULL, 0); +#pragma GCC diagnostic pop if (err < 0) { pr_err("Missing basic BPF support, skip this test: %s\n", strerror(errno)); diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c index fbb68deba59f..d01532d40acb 100644 --- a/tools/perf/tests/event_update.c +++ b/tools/perf/tests/event_update.c @@ -88,7 +88,6 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes struct evsel *evsel; struct event_name tmp; struct evlist *evlist = evlist__new_default(); - char *unit = strdup("KRAVA"); TEST_ASSERT_VAL("failed to get evlist", evlist); @@ -99,7 +98,8 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123); - evsel->unit = unit; + free((char *)evsel->unit); + evsel->unit = strdup("KRAVA"); TEST_ASSERT_VAL("failed to synthesize attr update unit", !perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit)); @@ -119,7 +119,6 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes TEST_ASSERT_VAL("failed to synthesize attr update cpus", !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus)); - free(unit); evlist__delete(evlist); return 0; } diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c index c895de481fe1..d54c5371c6a6 100644 --- a/tools/perf/tests/expr.c +++ b/tools/perf/tests/expr.c @@ -169,7 +169,9 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0); TEST_ASSERT_VAL("#num_cores >= #num_dies", num_cores >= num_dies); TEST_ASSERT_VAL("#num_packages", expr__parse(&num_packages, ctx, "#num_packages") == 0); - TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages); + + if (num_dies) // Some platforms do not have CPU die support, for example s390 + TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages); /* * Source count returns the number of events aggregating in a leader diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c index 574b7e4efd3a..07b6f4ec024f 100644 --- a/tools/perf/tests/parse-metric.c +++ b/tools/perf/tests/parse-metric.c @@ -109,6 +109,7 @@ static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist, struct evsel *evsel; u64 count; + perf_stat__reset_shadow_stats(); evlist__for_each_entry(evlist, evsel) { count = find_value(evsel->name, vals); perf_stat__update_shadow_stats(evsel, count, 0, st); diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c index b669d22f2b13..07f2411b0ad4 100644 --- a/tools/perf/tests/sample-parsing.c +++ b/tools/perf/tests/sample-parsing.c @@ -36,7 +36,7 @@ * These are based on the input value (213) specified * in branch_stack variable. */ -#define BS_EXPECTED_BE 0xa00d000000000000 +#define BS_EXPECTED_BE 0xa000d00000000000 #define BS_EXPECTED_LE 0xd5000000 #define FLAG(s) s->branch_stack->entries[i].flags diff --git a/tools/perf/tests/wp.c b/tools/perf/tests/wp.c index 820d942b30c3..9d4c45184e71 100644 --- a/tools/perf/tests/wp.c +++ b/tools/perf/tests/wp.c @@ -21,6 +21,7 @@ do { \ volatile u64 data1; volatile u8 data2[3]; +#ifndef __s390x__ static int wp_read(int fd, long long *count, int size) { int ret = read(fd, count, size); @@ -48,7 +49,6 @@ static void get__perf_event_attr(struct perf_event_attr *attr, int wp_type, attr->exclude_hv = 1; } -#ifndef __s390x__ static int __event(int wp_type, void *wp_addr, unsigned long wp_len) { int fd; diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index c1f24d004852..5075ecead5f3 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -535,6 +535,18 @@ struct perf_hpp_list perf_hpp_list = { #undef __HPP_SORT_ACC_FN #undef __HPP_SORT_RAW_FN +static void fmt_free(struct perf_hpp_fmt *fmt) +{ + /* + * At this point fmt should be completely + * unhooked, if not it's a bug. + */ + BUG_ON(!list_empty(&fmt->list)); + BUG_ON(!list_empty(&fmt->sort_list)); + + if (fmt->free) + fmt->free(fmt); +} void perf_hpp__init(void) { @@ -598,9 +610,10 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, list_add(&format->sort_list, &list->sorts); } -void perf_hpp__column_unregister(struct perf_hpp_fmt *format) +static void perf_hpp__column_unregister(struct perf_hpp_fmt *format) { list_del_init(&format->list); + fmt_free(format); } void perf_hpp__cancel_cumulate(void) @@ -672,19 +685,6 @@ next: } -static void fmt_free(struct perf_hpp_fmt *fmt) -{ - /* - * At this point fmt should be completely - * unhooked, if not it's a bug. - */ - BUG_ON(!list_empty(&fmt->list)); - BUG_ON(!list_empty(&fmt->sort_list)); - - if (fmt->free) - fmt->free(fmt); -} - void perf_hpp__reset_output_field(struct perf_hpp_list *list) { struct perf_hpp_fmt *fmt, *tmp; diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index 4748bcfe61de..fccac06b573a 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -51,6 +51,7 @@ struct arm_spe { u8 timeless_decoding; u8 data_queued; + u64 sample_type; u8 sample_flc; u8 sample_llc; u8 sample_tlb; @@ -287,6 +288,12 @@ static void arm_spe_prep_sample(struct arm_spe *spe, event->sample.header.size = sizeof(struct perf_event_header); } +static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type) +{ + event->header.size = perf_event__sample_event_size(sample, type, 0); + return perf_event__synthesize_sample(event, type, 0, sample); +} + static inline int arm_spe_deliver_synth_event(struct arm_spe *spe, struct arm_spe_queue *speq __maybe_unused, @@ -295,6 +302,12 @@ arm_spe_deliver_synth_event(struct arm_spe *spe, { int ret; + if (spe->synth_opts.inject) { + ret = arm_spe__inject_event(event, sample, spe->sample_type); + if (ret) + return ret; + } + ret = perf_session__deliver_synth_event(spe->session, event, sample); if (ret) pr_err("ARM SPE: failed to deliver event, error %d\n", ret); @@ -986,6 +999,8 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session) else attr.sample_type |= PERF_SAMPLE_TIME; + spe->sample_type = attr.sample_type; + attr.exclude_user = evsel->core.attr.exclude_user; attr.exclude_kernel = evsel->core.attr.exclude_kernel; attr.exclude_hv = evsel->core.attr.exclude_hv; diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index fbb3c4057c30..528aeb0ab79d 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -29,6 +29,9 @@ #include <internal/xyarray.h> +/* temporarily disable libbpf deprecation warnings */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)), const char *fmt, va_list args) { diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index c17d4a43ce06..5a97fd7d0a71 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -307,6 +307,20 @@ static bool bperf_attr_map_compatible(int attr_map_fd) (map_info.value_size == sizeof(struct perf_event_attr_map_entry)); } +int __weak +bpf_map_create(enum bpf_map_type map_type, + const char *map_name __maybe_unused, + __u32 key_size, + __u32 value_size, + __u32 max_entries, + const struct bpf_map_create_opts *opts __maybe_unused) +{ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + return bpf_create_map(map_type, key_size, value_size, max_entries, 0); +#pragma GCC diagnostic pop +} + static int bperf_lock_attr_map(struct target *target) { char path[PATH_MAX]; @@ -320,10 +334,10 @@ static int bperf_lock_attr_map(struct target *target) } if (access(path, F_OK)) { - map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, + map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(struct perf_event_attr), sizeof(struct perf_event_attr_map_entry), - ATTR_MAP_SIZE, 0); + ATTR_MAP_SIZE, NULL); if (map_fd < 0) return -1; diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h deleted file mode 100644 index 186a5551ddb9..000000000000 --- a/tools/perf/util/bpf_skel/bperf.h +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -// Copyright (c) 2021 Facebook - -#ifndef __BPERF_STAT_H -#define __BPERF_STAT_H - -typedef struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(key_size, sizeof(__u32)); - __uint(value_size, sizeof(struct bpf_perf_event_value)); - __uint(max_entries, 1); -} reading_map; - -#endif /* __BPERF_STAT_H */ diff --git a/tools/perf/util/bpf_skel/bperf_follower.bpf.c b/tools/perf/util/bpf_skel/bperf_follower.bpf.c index b8fa3cb2da23..f193998530d4 100644 --- a/tools/perf/util/bpf_skel/bperf_follower.bpf.c +++ b/tools/perf/util/bpf_skel/bperf_follower.bpf.c @@ -1,14 +1,23 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2021 Facebook -#include <linux/bpf.h> -#include <linux/perf_event.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "bperf.h" #include "bperf_u.h" -reading_map diff_readings SEC(".maps"); -reading_map accum_readings SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} diff_readings SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} accum_readings SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH); diff --git a/tools/perf/util/bpf_skel/bperf_leader.bpf.c b/tools/perf/util/bpf_skel/bperf_leader.bpf.c index 4f70d1459e86..e2a2d4cd7779 100644 --- a/tools/perf/util/bpf_skel/bperf_leader.bpf.c +++ b/tools/perf/util/bpf_skel/bperf_leader.bpf.c @@ -1,10 +1,8 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2021 Facebook -#include <linux/bpf.h> -#include <linux/perf_event.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "bperf.h" struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); @@ -13,8 +11,19 @@ struct { __uint(map_flags, BPF_F_PRESERVE_ELEMS); } events SEC(".maps"); -reading_map prev_readings SEC(".maps"); -reading_map diff_readings SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} prev_readings SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} diff_readings SEC(".maps"); SEC("raw_tp/sched_switch") int BPF_PROG(on_switch) diff --git a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c index ab12b4c4ece2..97037d3b3d9f 100644 --- a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c +++ b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2020 Facebook -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 95ffed66369c..c59331eea1d9 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -44,13 +44,16 @@ struct perf_event_attr; /* perf sample has 16 bits size limit */ #define PERF_SAMPLE_MAX_SIZE (1 << 16) +/* number of register is bound by the number of bits in regs_dump::mask (64) */ +#define PERF_SAMPLE_REGS_CACHE_SIZE (8 * sizeof(u64)) + struct regs_dump { u64 abi; u64 mask; u64 *regs; /* Cached values/mask filled by first register access. */ - u64 cache_regs[PERF_REGS_MAX]; + u64 cache_regs[PERF_SAMPLE_REGS_CACHE_SIZE]; u64 cache_mask; }; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index a59fb2ecb84e..ac0127be0459 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -241,7 +241,7 @@ void evsel__init(struct evsel *evsel, { perf_evsel__init(&evsel->core, attr, idx); evsel->tracking = !idx; - evsel->unit = ""; + evsel->unit = strdup(""); evsel->scale = 1.0; evsel->max_events = ULONG_MAX; evsel->evlist = NULL; @@ -276,13 +276,8 @@ struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx) } if (evsel__is_clock(evsel)) { - /* - * The evsel->unit points to static alias->unit - * so it's ok to use static string in here. - */ - static const char *unit = "msec"; - - evsel->unit = unit; + free((char *)evsel->unit); + evsel->unit = strdup("msec"); evsel->scale = 1e-6; } @@ -420,7 +415,11 @@ struct evsel *evsel__clone(struct evsel *orig) evsel->max_events = orig->max_events; evsel->tool_event = orig->tool_event; - evsel->unit = orig->unit; + free((char *)evsel->unit); + evsel->unit = strdup(orig->unit); + if (evsel->unit == NULL) + goto out_err; + evsel->scale = orig->scale; evsel->snapshot = orig->snapshot; evsel->per_pkg = orig->per_pkg; @@ -1441,6 +1440,7 @@ void evsel__exit(struct evsel *evsel) zfree(&evsel->group_name); zfree(&evsel->name); zfree(&evsel->pmu_name); + zfree(&evsel->unit); zfree(&evsel->metric_id); evsel__zero_per_pkg(evsel); hashmap__free(evsel->per_pkg_mask); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index fda8d14c891f..e3c1a532d059 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2321,6 +2321,7 @@ out: #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ {\ + free(ff->ph->env.__feat_env); \ ff->ph->env.__feat_env = do_read_string(ff); \ return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ } @@ -4124,6 +4125,7 @@ int perf_event__process_feature(struct perf_session *session, struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event; int type = fe->header.type; u64 feat = fe->feat_id; + int ret = 0; if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { pr_warning("invalid record type %d in pipe-mode\n", type); @@ -4141,11 +4143,13 @@ int perf_event__process_feature(struct perf_session *session, ff.size = event->header.size - sizeof(*fe); ff.ph = &session->header; - if (feat_ops[feat].process(&ff, NULL)) - return -1; + if (feat_ops[feat].process(&ff, NULL)) { + ret = -1; + goto out; + } if (!feat_ops[feat].print || !tool->show_feat_hdr) - return 0; + goto out; if (!feat_ops[feat].full_only || tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { @@ -4154,8 +4158,9 @@ int perf_event__process_feature(struct perf_session *session, fprintf(stdout, "# %s info available, use -I to display\n", feat_ops[feat].name); } - - return 0; +out: + free_event_desc(ff.events); + return ret; } size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) @@ -4257,9 +4262,11 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, switch (ev->type) { case PERF_EVENT_UPDATE__UNIT: + free((char *)evsel->unit); evsel->unit = strdup(ev->data); break; case PERF_EVENT_UPDATE__NAME: + free(evsel->name); evsel->name = strdup(ev->data); break; case PERF_EVENT_UPDATE__SCALE: @@ -4268,11 +4275,11 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, break; case PERF_EVENT_UPDATE__CPUS: ev_cpus = (struct perf_record_event_update_cpus *)ev->data; - map = cpu_map__new_data(&ev_cpus->cpus); - if (map) + if (map) { + perf_cpu_map__put(evsel->core.own_cpus); evsel->core.own_cpus = map; - else + } else pr_err("failed to get event_update cpus\n"); default: break; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 65fe65ba03c2..b776465e04ef 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -289,15 +289,10 @@ static long hist_time(unsigned long htime) return htime; } -static void he_stat__add_period(struct he_stat *he_stat, u64 period, - u64 weight, u64 ins_lat, u64 p_stage_cyc) +static void he_stat__add_period(struct he_stat *he_stat, u64 period) { - he_stat->period += period; - he_stat->weight += weight; he_stat->nr_events += 1; - he_stat->ins_lat += ins_lat; - he_stat->p_stage_cyc += p_stage_cyc; } static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) @@ -308,9 +303,6 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) dest->period_guest_sys += src->period_guest_sys; dest->period_guest_us += src->period_guest_us; dest->nr_events += src->nr_events; - dest->weight += src->weight; - dest->ins_lat += src->ins_lat; - dest->p_stage_cyc += src->p_stage_cyc; } static void he_stat__decay(struct he_stat *he_stat) @@ -598,9 +590,6 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists, struct hist_entry *he; int64_t cmp; u64 period = entry->stat.period; - u64 weight = entry->stat.weight; - u64 ins_lat = entry->stat.ins_lat; - u64 p_stage_cyc = entry->stat.p_stage_cyc; bool leftmost = true; p = &hists->entries_in->rb_root.rb_node; @@ -619,11 +608,11 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists, if (!cmp) { if (sample_self) { - he_stat__add_period(&he->stat, period, weight, ins_lat, p_stage_cyc); + he_stat__add_period(&he->stat, period); hist_entry__add_callchain_period(he, period); } if (symbol_conf.cumulate_callchain) - he_stat__add_period(he->stat_acc, period, weight, ins_lat, p_stage_cyc); + he_stat__add_period(he->stat_acc, period); /* * This mem info was allocated from sample__resolve_mem @@ -733,9 +722,6 @@ __hists__add_entry(struct hists *hists, .stat = { .nr_events = 1, .period = sample->period, - .weight = sample->weight, - .ins_lat = sample->ins_lat, - .p_stage_cyc = sample->p_stage_cyc, }, .parent = sym_parent, .filtered = symbol__parent_filter(sym_parent) | al->filtered, @@ -748,6 +734,9 @@ __hists__add_entry(struct hists *hists, .raw_size = sample->raw_size, .ops = ops, .time = hist_time(sample->time), + .weight = sample->weight, + .ins_lat = sample->ins_lat, + .p_stage_cyc = sample->p_stage_cyc, }, *he = hists__findnew_entry(hists, &entry, al, sample_self); if (!hists->has_callchains && he && he->callchain_size != 0) diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 5343b62476e6..621f35ae1efa 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -369,7 +369,6 @@ enum { }; void perf_hpp__init(void); -void perf_hpp__column_unregister(struct perf_hpp_fmt *format); void perf_hpp__cancel_cumulate(void); void perf_hpp__setup_output_field(struct perf_hpp_list *list); void perf_hpp__reset_output_field(struct perf_hpp_list *list); diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 5f83937bf8f3..0e013c2d9eb4 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1205,61 +1205,69 @@ out_no_progress: static bool intel_pt_fup_event(struct intel_pt_decoder *decoder) { + enum intel_pt_sample_type type = decoder->state.type; bool ret = false; + decoder->state.type &= ~INTEL_PT_BRANCH; + if (decoder->set_fup_tx_flags) { decoder->set_fup_tx_flags = false; decoder->tx_flags = decoder->fup_tx_flags; - decoder->state.type = INTEL_PT_TRANSACTION; + decoder->state.type |= INTEL_PT_TRANSACTION; if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX) decoder->state.type |= INTEL_PT_BRANCH; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; decoder->state.flags = decoder->fup_tx_flags; - return true; + ret = true; } if (decoder->set_fup_ptw) { decoder->set_fup_ptw = false; - decoder->state.type = INTEL_PT_PTW; + decoder->state.type |= INTEL_PT_PTW; decoder->state.flags |= INTEL_PT_FUP_IP; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; decoder->state.ptw_payload = decoder->fup_ptw_payload; - return true; + ret = true; } if (decoder->set_fup_mwait) { decoder->set_fup_mwait = false; - decoder->state.type = INTEL_PT_MWAIT_OP; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; + decoder->state.type |= INTEL_PT_MWAIT_OP; decoder->state.mwait_payload = decoder->fup_mwait_payload; ret = true; } if (decoder->set_fup_pwre) { decoder->set_fup_pwre = false; decoder->state.type |= INTEL_PT_PWR_ENTRY; - decoder->state.type &= ~INTEL_PT_BRANCH; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; decoder->state.pwre_payload = decoder->fup_pwre_payload; ret = true; } if (decoder->set_fup_exstop) { decoder->set_fup_exstop = false; decoder->state.type |= INTEL_PT_EX_STOP; - decoder->state.type &= ~INTEL_PT_BRANCH; decoder->state.flags |= INTEL_PT_FUP_IP; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; ret = true; } if (decoder->set_fup_bep) { decoder->set_fup_bep = false; decoder->state.type |= INTEL_PT_BLK_ITEMS; - decoder->state.type &= ~INTEL_PT_BRANCH; + ret = true; + } + if (decoder->overflow) { + decoder->overflow = false; + if (!ret && !decoder->pge) { + if (decoder->hop) { + decoder->state.type = 0; + decoder->pkt_state = INTEL_PT_STATE_RESAMPLE; + } + decoder->pge = true; + decoder->state.type |= INTEL_PT_BRANCH | INTEL_PT_TRACE_BEGIN; + decoder->state.from_ip = 0; + decoder->state.to_ip = decoder->ip; + return true; + } + } + if (ret) { decoder->state.from_ip = decoder->ip; decoder->state.to_ip = 0; - ret = true; + } else { + decoder->state.type = type; } return ret; } @@ -1608,7 +1616,16 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder) intel_pt_clear_tx_flags(decoder); intel_pt_set_nr(decoder); decoder->timestamp_insn_cnt = 0; - decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; + decoder->state.from_ip = decoder->ip; + decoder->ip = 0; + decoder->pge = false; + decoder->set_fup_tx_flags = false; + decoder->set_fup_ptw = false; + decoder->set_fup_mwait = false; + decoder->set_fup_pwre = false; + decoder->set_fup_exstop = false; + decoder->set_fup_bep = false; decoder->overflow = true; return -EOVERFLOW; } @@ -2666,6 +2683,8 @@ static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder); /* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err) { + *err = 0; + /* Leap from PSB to PSB, getting ip from FUP within PSB+ */ if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) { *err = intel_pt_scan_for_psb(decoder); @@ -2678,6 +2697,7 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in return HOP_IGNORE; case INTEL_PT_TIP_PGD: + decoder->pge = false; if (!decoder->packet.count) { intel_pt_set_nr(decoder); return HOP_IGNORE; @@ -2705,18 +2725,21 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in if (!decoder->packet.count) return HOP_IGNORE; intel_pt_set_ip(decoder); - if (intel_pt_fup_event(decoder)) - return HOP_RETURN; - if (!decoder->branch_enable) + if (decoder->set_fup_mwait || decoder->set_fup_pwre) + *no_tip = true; + if (!decoder->branch_enable || !decoder->pge) *no_tip = true; if (*no_tip) { decoder->state.type = INTEL_PT_INSTRUCTION; decoder->state.from_ip = decoder->ip; decoder->state.to_ip = 0; + intel_pt_fup_event(decoder); return HOP_RETURN; } + intel_pt_fup_event(decoder); + decoder->state.type |= INTEL_PT_INSTRUCTION | INTEL_PT_BRANCH; *err = intel_pt_walk_fup_tip(decoder); - if (!*err) + if (!*err && decoder->state.to_ip) decoder->pkt_state = INTEL_PT_STATE_RESAMPLE; return HOP_RETURN; @@ -2897,7 +2920,7 @@ static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err) { struct intel_pt_psb_info data = { .fup = false }; - if (!decoder->branch_enable || !decoder->pge) + if (!decoder->branch_enable) return false; intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data); @@ -2924,6 +2947,7 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder) if (err) return err; next: + err = 0; if (decoder->cyc_threshold) { if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC) decoder->sample_cyc = false; @@ -2962,6 +2986,7 @@ next: case INTEL_PT_TIP_PGE: { decoder->pge = true; + decoder->overflow = false; intel_pt_mtc_cyc_cnt_pge(decoder); intel_pt_set_nr(decoder); if (decoder->packet.count == 0) { @@ -2999,7 +3024,7 @@ next: break; } intel_pt_set_last_ip(decoder); - if (!decoder->branch_enable) { + if (!decoder->branch_enable || !decoder->pge) { decoder->ip = decoder->last_ip; if (intel_pt_fup_event(decoder)) return 0; @@ -3467,10 +3492,10 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder) decoder->set_fup_pwre = false; decoder->set_fup_exstop = false; decoder->set_fup_bep = false; + decoder->overflow = false; if (!decoder->branch_enable) { decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; - decoder->overflow = false; decoder->state.type = 0; /* Do not have a sample */ return 0; } @@ -3485,7 +3510,6 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder) decoder->pkt_state = INTEL_PT_STATE_RESAMPLE; else decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; - decoder->overflow = false; decoder->state.from_ip = 0; decoder->state.to_ip = decoder->ip; @@ -3607,7 +3631,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) } decoder->have_last_ip = true; - decoder->pkt_state = INTEL_PT_STATE_NO_IP; + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; err = intel_pt_walk_psb(decoder); if (err) @@ -3704,7 +3728,8 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) if (err) { decoder->state.err = intel_pt_ext_err(err); - decoder->state.from_ip = decoder->ip; + if (err != -EOVERFLOW) + decoder->state.from_ip = decoder->ip; intel_pt_update_sample_time(decoder); decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt; intel_pt_set_nr(decoder); diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 556a893508da..10c3187e4c5a 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -2565,6 +2565,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) ptq->sync_switch = false; intel_pt_next_tid(pt, ptq); } + ptq->timestamp = state->est_timestamp; if (pt->synth_opts.errors) { err = intel_ptq_synth_error(ptq, state); if (err) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 5bfb6f892489..ba74fdf74af9 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -402,8 +402,10 @@ static int add_event_tool(struct list_head *list, int *idx, if (!evsel) return -ENOMEM; evsel->tool_event = tool_event; - if (tool_event == PERF_TOOL_DURATION_TIME) - evsel->unit = "ns"; + if (tool_event == PERF_TOOL_DURATION_TIME) { + free((char *)evsel->unit); + evsel->unit = strdup("ns"); + } return 0; } @@ -1630,7 +1632,8 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, if (parse_state->fake_pmu) return 0; - evsel->unit = info.unit; + free((char *)evsel->unit); + evsel->unit = strdup(info.unit); evsel->scale = info.scale; evsel->per_pkg = info.per_pkg; evsel->snapshot = info.snapshot; diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c index 5ee47ae1509c..06a7461ba864 100644 --- a/tools/perf/util/perf_regs.c +++ b/tools/perf/util/perf_regs.c @@ -25,6 +25,9 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) int i, idx = 0; u64 mask = regs->mask; + if ((u64)id >= PERF_SAMPLE_REGS_CACHE_SIZE) + return -EINVAL; + if (regs->cache_mask & (1ULL << id)) goto out; diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 563a9ba8954f..7f782a31bda3 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -461,7 +461,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name) struct tep_event *tp_format; tp_format = trace_event__tp_format_id(evsel->core.attr.config); - if (!tp_format) + if (IS_ERR_OR_NULL(tp_format)) return NULL; evsel->tp_format = tp_format; diff --git a/tools/perf/util/smt.c b/tools/perf/util/smt.c index 20bacd5972ad..34f1b1b1176c 100644 --- a/tools/perf/util/smt.c +++ b/tools/perf/util/smt.c @@ -15,7 +15,7 @@ int smt_on(void) if (cached) return cached_result; - if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) > 0) + if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0) goto done; ncpu = sysconf(_SC_NPROCESSORS_CONF); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 568a88c001c6..a111065b484e 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -1325,88 +1325,68 @@ struct sort_entry sort_mispredict = { .se_width_idx = HISTC_MISPREDICT, }; -static u64 he_weight(struct hist_entry *he) -{ - return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; -} - static int64_t -sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) +sort__weight_cmp(struct hist_entry *left, struct hist_entry *right) { - return he_weight(left) - he_weight(right); + return left->weight - right->weight; } static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); + return repsep_snprintf(bf, size, "%-*llu", width, he->weight); } struct sort_entry sort_local_weight = { .se_header = "Local Weight", - .se_cmp = sort__local_weight_cmp, + .se_cmp = sort__weight_cmp, .se_snprintf = hist_entry__local_weight_snprintf, .se_width_idx = HISTC_LOCAL_WEIGHT, }; -static int64_t -sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return left->stat.weight - right->stat.weight; -} - static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); + return repsep_snprintf(bf, size, "%-*llu", width, + he->weight * he->stat.nr_events); } struct sort_entry sort_global_weight = { .se_header = "Weight", - .se_cmp = sort__global_weight_cmp, + .se_cmp = sort__weight_cmp, .se_snprintf = hist_entry__global_weight_snprintf, .se_width_idx = HISTC_GLOBAL_WEIGHT, }; -static u64 he_ins_lat(struct hist_entry *he) -{ - return he->stat.nr_events ? he->stat.ins_lat / he->stat.nr_events : 0; -} - static int64_t -sort__local_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) +sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) { - return he_ins_lat(left) - he_ins_lat(right); + return left->ins_lat - right->ins_lat; } static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*u", width, he_ins_lat(he)); + return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat); } struct sort_entry sort_local_ins_lat = { .se_header = "Local INSTR Latency", - .se_cmp = sort__local_ins_lat_cmp, + .se_cmp = sort__ins_lat_cmp, .se_snprintf = hist_entry__local_ins_lat_snprintf, .se_width_idx = HISTC_LOCAL_INS_LAT, }; -static int64_t -sort__global_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return left->stat.ins_lat - right->stat.ins_lat; -} - static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*u", width, he->stat.ins_lat); + return repsep_snprintf(bf, size, "%-*u", width, + he->ins_lat * he->stat.nr_events); } struct sort_entry sort_global_ins_lat = { .se_header = "INSTR Latency", - .se_cmp = sort__global_ins_lat_cmp, + .se_cmp = sort__ins_lat_cmp, .se_snprintf = hist_entry__global_ins_lat_snprintf, .se_width_idx = HISTC_GLOBAL_INS_LAT, }; @@ -1414,13 +1394,13 @@ struct sort_entry sort_global_ins_lat = { static int64_t sort__global_p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) { - return left->stat.p_stage_cyc - right->stat.p_stage_cyc; + return left->p_stage_cyc - right->p_stage_cyc; } static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*u", width, he->stat.p_stage_cyc); + return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc); } struct sort_entry sort_p_stage_cyc = { diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index b67c469aba79..7b7145501933 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -49,9 +49,6 @@ struct he_stat { u64 period_us; u64 period_guest_sys; u64 period_guest_us; - u64 weight; - u64 ins_lat; - u64 p_stage_cyc; u32 nr_events; }; @@ -109,6 +106,9 @@ struct hist_entry { s32 socket; s32 cpu; u64 code_page_size; + u64 weight; + u64 ins_lat; + u64 p_stage_cyc; u8 cpumode; u8 depth; diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 37a9492edb3e..df3c4671be72 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -379,32 +379,32 @@ fetch_kernel_version(unsigned int *puint, char *str, return 0; } -const char *perf_tip(const char *dirpath) +int perf_tip(char **strp, const char *dirpath) { struct strlist *tips; struct str_node *node; - char *tip = NULL; struct strlist_config conf = { .dirname = dirpath, .file_only = true, }; + int ret = 0; + *strp = NULL; tips = strlist__new("tips.txt", &conf); if (tips == NULL) - return errno == ENOENT ? NULL : - "Tip: check path of tips.txt or get more memory! ;-p"; + return -errno; if (strlist__nr_entries(tips) == 0) goto out; node = strlist__entry(tips, random() % strlist__nr_entries(tips)); - if (asprintf(&tip, "Tip: %s", node->s) < 0) - tip = (char *)"Tip: get more memory! ;-)"; + if (asprintf(strp, "Tip: %s", node->s) < 0) + ret = -ENOMEM; out: strlist__delete(tips); - return tip; + return ret; } char *perf_exe(char *buf, int len) diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index ad737052e597..9f0d36ba77f2 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -39,7 +39,7 @@ int fetch_kernel_version(unsigned int *puint, #define KVER_FMT "%d.%d.%d" #define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x) -const char *perf_tip(const char *dirpath); +int perf_tip(char **strp, const char *dirpath); #ifndef HAVE_SCHED_GETCPU_SUPPORT int sched_getcpu(void); diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config index 331f6d30f472..cd7106876a5f 100644 --- a/tools/power/acpi/Makefile.config +++ b/tools/power/acpi/Makefile.config @@ -69,6 +69,7 @@ KERNEL_INCLUDE := $(OUTPUT)include ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE) CFLAGS += $(WARNINGS) +MKDIR = mkdir ifeq ($(strip $(V)),false) QUIET=@ diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules index 2a6c170b57cd..1d7616f5d0ae 100644 --- a/tools/power/acpi/Makefile.rules +++ b/tools/power/acpi/Makefile.rules @@ -21,6 +21,7 @@ $(KERNEL_INCLUDE): $(objdir)%.o: %.c $(KERNEL_INCLUDE) $(ECHO) " CC " $(subst $(OUTPUT),,$@) + $(QUIET) $(MKDIR) -p $(objdir) 2>/dev/null $(QUIET) $(CC) -c $(CFLAGS) -o $@ $< all: $(OUTPUT)$(TOOL) diff --git a/tools/testing/radix-tree/linux/lockdep.h b/tools/testing/radix-tree/linux/lockdep.h index 565fccdfe6e9..016cff473cfc 100644 --- a/tools/testing/radix-tree/linux/lockdep.h +++ b/tools/testing/radix-tree/linux/lockdep.h @@ -1,5 +1,8 @@ #ifndef _LINUX_LOCKDEP_H #define _LINUX_LOCKDEP_H + +#include <linux/spinlock.h> + struct lock_class_key { unsigned int a; }; diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 5d42db2e129a..a795bca4c8ec 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -192,22 +192,26 @@ TEST_GEN_PROGS_EXTENDED += $(DEFAULT_BPFTOOL) $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ) -$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_sock: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_sock_addr: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_sockmap: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c testing_helpers.o -$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_sock_fields: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_sysctl: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_tag: testing_helpers.o -$(OUTPUT)/test_lirc_mode2_user: testing_helpers.o -$(OUTPUT)/xdping: testing_helpers.o -$(OUTPUT)/flow_dissector_load: testing_helpers.o -$(OUTPUT)/test_maps: testing_helpers.o -$(OUTPUT)/test_verifier: testing_helpers.o +CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o +TESTING_HELPERS := $(OUTPUT)/testing_helpers.o +TRACE_HELPERS := $(OUTPUT)/trace_helpers.o + +$(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_skb_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_sock: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_sock_addr: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS) +$(OUTPUT)/get_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_cgroup_storage: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_tag: $(TESTING_HELPERS) +$(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS) +$(OUTPUT)/xdping: $(TESTING_HELPERS) +$(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS) +$(OUTPUT)/test_maps: $(TESTING_HELPERS) +$(OUTPUT)/test_verifier: $(TESTING_HELPERS) BPFTOOL ?= $(DEFAULT_BPFTOOL) $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ @@ -325,9 +329,10 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \ linked_vars.skel.h linked_maps.skel.h LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \ - test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c + test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \ + map_ptr_kern.c core_kern.c # Generate both light skeleton and libbpf skeleton for these -LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c +LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test_subprog.c SKEL_BLACKLIST += $$(LSKELS) test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o @@ -531,14 +536,18 @@ $(OUTPUT)/bench_trigger.o: $(OUTPUT)/trigger_bench.skel.h $(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \ $(OUTPUT)/perfbuf_bench.skel.h $(OUTPUT)/bench_bloom_filter_map.o: $(OUTPUT)/bloom_filter_bench.skel.h +$(OUTPUT)/bench_bpf_loop.o: $(OUTPUT)/bpf_loop_bench.skel.h $(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ) $(OUTPUT)/bench: LDLIBS += -lm -$(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \ +$(OUTPUT)/bench: $(OUTPUT)/bench.o \ + $(TESTING_HELPERS) \ + $(TRACE_HELPERS) \ $(OUTPUT)/bench_count.o \ $(OUTPUT)/bench_rename.o \ $(OUTPUT)/bench_trigger.o \ $(OUTPUT)/bench_ringbufs.o \ - $(OUTPUT)/bench_bloom_filter_map.o + $(OUTPUT)/bench_bloom_filter_map.o \ + $(OUTPUT)/bench_bpf_loop.o $(call msg,BINARY,,$@) $(Q)$(CC) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index cc4722f693e9..3d6082b97a56 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -134,6 +134,39 @@ void hits_drops_report_final(struct bench_res res[], int res_cnt) total_ops_mean, total_ops_stddev); } +void ops_report_progress(int iter, struct bench_res *res, long delta_ns) +{ + double hits_per_sec, hits_per_prod; + + hits_per_sec = res->hits / 1000000.0 / (delta_ns / 1000000000.0); + hits_per_prod = hits_per_sec / env.producer_cnt; + + printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0); + + printf("hits %8.3lfM/s (%7.3lfM/prod)\n", hits_per_sec, hits_per_prod); +} + +void ops_report_final(struct bench_res res[], int res_cnt) +{ + double hits_mean = 0.0, hits_stddev = 0.0; + int i; + + for (i = 0; i < res_cnt; i++) + hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt); + + if (res_cnt > 1) { + for (i = 0; i < res_cnt; i++) + hits_stddev += (hits_mean - res[i].hits / 1000000.0) * + (hits_mean - res[i].hits / 1000000.0) / + (res_cnt - 1.0); + + hits_stddev = sqrt(hits_stddev); + } + printf("Summary: throughput %8.3lf \u00B1 %5.3lf M ops/s (%7.3lfM ops/prod), ", + hits_mean, hits_stddev, hits_mean / env.producer_cnt); + printf("latency %8.3lf ns/op\n", 1000.0 / hits_mean * env.producer_cnt); +} + const char *argp_program_version = "benchmark"; const char *argp_program_bug_address = "<bpf@vger.kernel.org>"; const char argp_program_doc[] = @@ -171,10 +204,12 @@ static const struct argp_option opts[] = { extern struct argp bench_ringbufs_argp; extern struct argp bench_bloom_map_argp; +extern struct argp bench_bpf_loop_argp; static const struct argp_child bench_parsers[] = { { &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 }, { &bench_bloom_map_argp, 0, "Bloom filter map benchmark", 0 }, + { &bench_bpf_loop_argp, 0, "bpf_loop helper benchmark", 0 }, {}, }; @@ -359,6 +394,11 @@ extern const struct bench bench_trig_kprobe; extern const struct bench bench_trig_fentry; extern const struct bench bench_trig_fentry_sleep; extern const struct bench bench_trig_fmodret; +extern const struct bench bench_trig_uprobe_base; +extern const struct bench bench_trig_uprobe_with_nop; +extern const struct bench bench_trig_uretprobe_with_nop; +extern const struct bench bench_trig_uprobe_without_nop; +extern const struct bench bench_trig_uretprobe_without_nop; extern const struct bench bench_rb_libbpf; extern const struct bench bench_rb_custom; extern const struct bench bench_pb_libbpf; @@ -368,6 +408,7 @@ extern const struct bench bench_bloom_update; extern const struct bench bench_bloom_false_positive; extern const struct bench bench_hashmap_without_bloom; extern const struct bench bench_hashmap_with_bloom; +extern const struct bench bench_bpf_loop; static const struct bench *benchs[] = { &bench_count_global, @@ -385,6 +426,11 @@ static const struct bench *benchs[] = { &bench_trig_fentry, &bench_trig_fentry_sleep, &bench_trig_fmodret, + &bench_trig_uprobe_base, + &bench_trig_uprobe_with_nop, + &bench_trig_uretprobe_with_nop, + &bench_trig_uprobe_without_nop, + &bench_trig_uretprobe_without_nop, &bench_rb_libbpf, &bench_rb_custom, &bench_pb_libbpf, @@ -394,6 +440,7 @@ static const struct bench *benchs[] = { &bench_bloom_false_positive, &bench_hashmap_without_bloom, &bench_hashmap_with_bloom, + &bench_bpf_loop, }; static void setup_benchmark() diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h index 624c6b11501f..50785503756b 100644 --- a/tools/testing/selftests/bpf/bench.h +++ b/tools/testing/selftests/bpf/bench.h @@ -59,6 +59,8 @@ void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns); void hits_drops_report_final(struct bench_res res[], int res_cnt); void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns); void false_hits_report_final(struct bench_res res[], int res_cnt); +void ops_report_progress(int iter, struct bench_res *res, long delta_ns); +void ops_report_final(struct bench_res res[], int res_cnt); static inline __u64 get_time_ns() { struct timespec t; diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c new file mode 100644 index 000000000000..d0a6572bfab6 --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <argp.h> +#include "bench.h" +#include "bpf_loop_bench.skel.h" + +/* BPF triggering benchmarks */ +static struct ctx { + struct bpf_loop_bench *skel; +} ctx; + +static struct { + __u32 nr_loops; +} args = { + .nr_loops = 10, +}; + +enum { + ARG_NR_LOOPS = 4000, +}; + +static const struct argp_option opts[] = { + { "nr_loops", ARG_NR_LOOPS, "nr_loops", 0, + "Set number of loops for the bpf_loop helper"}, + {}, +}; + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + switch (key) { + case ARG_NR_LOOPS: + args.nr_loops = strtol(arg, NULL, 10); + break; + default: + return ARGP_ERR_UNKNOWN; + } + + return 0; +} + +/* exported into benchmark runner */ +const struct argp bench_bpf_loop_argp = { + .options = opts, + .parser = parse_arg, +}; + +static void validate(void) +{ + if (env.consumer_cnt != 1) { + fprintf(stderr, "benchmark doesn't support multi-consumer!\n"); + exit(1); + } +} + +static void *producer(void *input) +{ + while (true) + /* trigger the bpf program */ + syscall(__NR_getpgid); + + return NULL; +} + +static void *consumer(void *input) +{ + return NULL; +} + +static void measure(struct bench_res *res) +{ + res->hits = atomic_swap(&ctx.skel->bss->hits, 0); +} + +static void setup(void) +{ + struct bpf_link *link; + + setup_libbpf(); + + ctx.skel = bpf_loop_bench__open_and_load(); + if (!ctx.skel) { + fprintf(stderr, "failed to open skeleton\n"); + exit(1); + } + + link = bpf_program__attach(ctx.skel->progs.benchmark); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } + + ctx.skel->bss->nr_loops = args.nr_loops; +} + +const struct bench bench_bpf_loop = { + .name = "bpf-loop", + .validate = validate, + .setup = setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = ops_report_progress, + .report_final = ops_report_final, +}; diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c index f41a491a8cc0..049a5ad56f65 100644 --- a/tools/testing/selftests/bpf/benchs/bench_trigger.c +++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c @@ -2,6 +2,7 @@ /* Copyright (c) 2020 Facebook */ #include "bench.h" #include "trigger_bench.skel.h" +#include "trace_helpers.h" /* BPF triggering benchmarks */ static struct trigger_ctx { @@ -107,6 +108,101 @@ static void *trigger_consumer(void *input) return NULL; } +/* make sure call is not inlined and not avoided by compiler, so __weak and + * inline asm volatile in the body of the function + * + * There is a performance difference between uprobing at nop location vs other + * instructions. So use two different targets, one of which starts with nop + * and another doesn't. + * + * GCC doesn't generate stack setup preample for these functions due to them + * having no input arguments and doing nothing in the body. + */ +__weak void uprobe_target_with_nop(void) +{ + asm volatile ("nop"); +} + +__weak void uprobe_target_without_nop(void) +{ + asm volatile (""); +} + +static void *uprobe_base_producer(void *input) +{ + while (true) { + uprobe_target_with_nop(); + atomic_inc(&base_hits.value); + } + return NULL; +} + +static void *uprobe_producer_with_nop(void *input) +{ + while (true) + uprobe_target_with_nop(); + return NULL; +} + +static void *uprobe_producer_without_nop(void *input) +{ + while (true) + uprobe_target_without_nop(); + return NULL; +} + +static void usetup(bool use_retprobe, bool use_nop) +{ + size_t uprobe_offset; + ssize_t base_addr; + struct bpf_link *link; + + setup_libbpf(); + + ctx.skel = trigger_bench__open_and_load(); + if (!ctx.skel) { + fprintf(stderr, "failed to open skeleton\n"); + exit(1); + } + + base_addr = get_base_addr(); + if (use_nop) + uprobe_offset = get_uprobe_offset(&uprobe_target_with_nop, base_addr); + else + uprobe_offset = get_uprobe_offset(&uprobe_target_without_nop, base_addr); + + link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe, + use_retprobe, + -1 /* all PIDs */, + "/proc/self/exe", + uprobe_offset); + if (!link) { + fprintf(stderr, "failed to attach uprobe!\n"); + exit(1); + } + ctx.skel->links.bench_trigger_uprobe = link; +} + +static void uprobe_setup_with_nop() +{ + usetup(false, true); +} + +static void uretprobe_setup_with_nop() +{ + usetup(true, true); +} + +static void uprobe_setup_without_nop() +{ + usetup(false, false); +} + +static void uretprobe_setup_without_nop() +{ + usetup(true, false); +} + const struct bench bench_trig_base = { .name = "trig-base", .validate = trigger_validate, @@ -182,3 +278,53 @@ const struct bench bench_trig_fmodret = { .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, }; + +const struct bench bench_trig_uprobe_base = { + .name = "trig-uprobe-base", + .setup = NULL, /* no uprobe/uretprobe is attached */ + .producer_thread = uprobe_base_producer, + .consumer_thread = trigger_consumer, + .measure = trigger_base_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_uprobe_with_nop = { + .name = "trig-uprobe-with-nop", + .setup = uprobe_setup_with_nop, + .producer_thread = uprobe_producer_with_nop, + .consumer_thread = trigger_consumer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_uretprobe_with_nop = { + .name = "trig-uretprobe-with-nop", + .setup = uretprobe_setup_with_nop, + .producer_thread = uprobe_producer_with_nop, + .consumer_thread = trigger_consumer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_uprobe_without_nop = { + .name = "trig-uprobe-without-nop", + .setup = uprobe_setup_without_nop, + .producer_thread = uprobe_producer_without_nop, + .consumer_thread = trigger_consumer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_uretprobe_without_nop = { + .name = "trig-uretprobe-without-nop", + .setup = uretprobe_setup_without_nop, + .producer_thread = uprobe_producer_without_nop, + .consumer_thread = trigger_consumer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bpf_loop.sh b/tools/testing/selftests/bpf/benchs/run_bench_bpf_loop.sh new file mode 100755 index 000000000000..d4f5f73b356b --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/run_bench_bpf_loop.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source ./benchs/run_common.sh + +set -eufo pipefail + +for t in 1 4 8 12 16; do +for i in 10 100 500 1000 5000 10000 50000 100000 500000 1000000; do +subtitle "nr_loops: $i, nr_threads: $t" + summarize_ops "bpf_loop: " \ + "$($RUN_BENCH -p $t --nr_loops $i bpf-loop)" + printf "\n" +done +done diff --git a/tools/testing/selftests/bpf/benchs/run_common.sh b/tools/testing/selftests/bpf/benchs/run_common.sh index 9a16be78b180..6c5e6023a69f 100644 --- a/tools/testing/selftests/bpf/benchs/run_common.sh +++ b/tools/testing/selftests/bpf/benchs/run_common.sh @@ -33,6 +33,14 @@ function percentage() echo "$*" | sed -E "s/.*Percentage\s=\s+([0-9]+\.[0-9]+).*/\1/" } +function ops() +{ + echo -n "throughput: " + echo -n "$*" | sed -E "s/.*throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*/\1/" + echo -n -e ", latency: " + echo "$*" | sed -E "s/.*latency\s+([0-9]+\.[0-9]+\sns\/op).*/\1/" +} + function total() { echo "$*" | sed -E "s/.*total operations\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" @@ -52,6 +60,13 @@ function summarize_percentage() printf "%-20s %s%%\n" "$bench" "$(percentage $summary)" } +function summarize_ops() +{ + bench="$1" + summary=$(echo $2 | tail -n1) + printf "%-20s %s\n" "$bench" "$(ops $summary)" +} + function summarize_total() { bench="$1" diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 5d52ea2768df..df3b292a8ffe 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -33,6 +33,22 @@ noinline int bpf_testmod_loop_test(int n) return sum; } +__weak noinline struct file *bpf_testmod_return_ptr(int arg) +{ + static struct file f = {}; + + switch (arg) { + case 1: return (void *)EINVAL; /* user addr */ + case 2: return (void *)0xcafe4a11; /* user addr */ + case 3: return (void *)-EINVAL; /* canonical, but invalid */ + case 4: return (void *)(1ull << 60); /* non-canonical and invalid */ + case 5: return (void *)~(1ull << 30); /* trigger extable */ + case 6: return &f; /* valid addr */ + case 7: return (void *)((long)&f | 1); /* kernel tricks */ + default: return NULL; + } +} + noinline ssize_t bpf_testmod_test_read(struct file *file, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -43,6 +59,10 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, .off = off, .len = len, }; + int i = 1; + + while (bpf_testmod_return_ptr(i)) + i++; /* This is always true. Use the check to make sure the compiler * doesn't remove bpf_testmod_loop_test. diff --git a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c index f4d870da7684..78c76496b14a 100644 --- a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c @@ -68,13 +68,6 @@ static void map_batch_verify(int *visited, __u32 max_entries, int *keys, static void __test_map_lookup_and_update_batch(bool is_pcpu) { - struct bpf_create_map_attr xattr = { - .name = "array_map", - .map_type = is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY : - BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(int), - .value_size = sizeof(__s64), - }; int map_fd, *keys, *visited; __u32 count, total, total_success; const __u32 max_entries = 10; @@ -86,10 +79,10 @@ static void __test_map_lookup_and_update_batch(bool is_pcpu) .flags = 0, ); - xattr.max_entries = max_entries; - map_fd = bpf_create_map_xattr(&xattr); + map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY : BPF_MAP_TYPE_ARRAY, + "array_map", sizeof(int), sizeof(__s64), max_entries, NULL); CHECK(map_fd == -1, - "bpf_create_map_xattr()", "error:%s\n", strerror(errno)); + "bpf_map_create()", "error:%s\n", strerror(errno)); value_size = sizeof(__s64); if (is_pcpu) diff --git a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c index 976bf415fbdd..f807d53fd8dd 100644 --- a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c @@ -83,22 +83,15 @@ void __test_map_lookup_and_delete_batch(bool is_pcpu) int err, step, value_size; bool nospace_err; void *values; - struct bpf_create_map_attr xattr = { - .name = "hash_map", - .map_type = is_pcpu ? BPF_MAP_TYPE_PERCPU_HASH : - BPF_MAP_TYPE_HASH, - .key_size = sizeof(int), - .value_size = sizeof(int), - }; DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts, .elem_flags = 0, .flags = 0, ); - xattr.max_entries = max_entries; - map_fd = bpf_create_map_xattr(&xattr); + map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_HASH : BPF_MAP_TYPE_HASH, + "hash_map", sizeof(int), sizeof(int), max_entries, NULL); CHECK(map_fd == -1, - "bpf_create_map_xattr()", "error:%s\n", strerror(errno)); + "bpf_map_create()", "error:%s\n", strerror(errno)); value_size = is_pcpu ? sizeof(value) : sizeof(int); keys = malloc(max_entries * sizeof(int)); diff --git a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c index 2e986e5e4cac..87d07b596e17 100644 --- a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c @@ -64,13 +64,7 @@ static void map_batch_verify(int *visited, __u32 max_entries, void test_lpm_trie_map_batch_ops(void) { - struct bpf_create_map_attr xattr = { - .name = "lpm_trie_map", - .map_type = BPF_MAP_TYPE_LPM_TRIE, - .key_size = sizeof(struct test_lpm_key), - .value_size = sizeof(int), - .map_flags = BPF_F_NO_PREALLOC, - }; + LIBBPF_OPTS(bpf_map_create_opts, create_opts, .map_flags = BPF_F_NO_PREALLOC); struct test_lpm_key *keys, key; int map_fd, *values, *visited; __u32 step, count, total, total_success; @@ -82,9 +76,10 @@ void test_lpm_trie_map_batch_ops(void) .flags = 0, ); - xattr.max_entries = max_entries; - map_fd = bpf_create_map_xattr(&xattr); - CHECK(map_fd == -1, "bpf_create_map_xattr()", "error:%s\n", + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, "lpm_trie_map", + sizeof(struct test_lpm_key), sizeof(int), + max_entries, &create_opts); + CHECK(map_fd == -1, "bpf_map_create()", "error:%s\n", strerror(errno)); keys = malloc(max_entries * sizeof(struct test_lpm_key)); diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c index e569edc679d8..099eb4dfd4f7 100644 --- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c +++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c @@ -19,16 +19,12 @@ #include <test_btf.h> #include <test_maps.h> -static struct bpf_create_map_attr xattr = { - .name = "sk_storage_map", - .map_type = BPF_MAP_TYPE_SK_STORAGE, - .map_flags = BPF_F_NO_PREALLOC, - .max_entries = 0, - .key_size = 4, - .value_size = 8, +static struct bpf_map_create_opts map_opts = { + .sz = sizeof(map_opts), .btf_key_type_id = 1, .btf_value_type_id = 3, .btf_fd = -1, + .map_flags = BPF_F_NO_PREALLOC, }; static unsigned int nr_sk_threads_done; @@ -140,7 +136,7 @@ static int load_btf(void) memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types), btf_str_sec, sizeof(btf_str_sec)); - return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0); + return bpf_btf_load(raw_btf, sizeof(raw_btf), NULL); } static int create_sk_storage_map(void) @@ -150,13 +146,13 @@ static int create_sk_storage_map(void) btf_fd = load_btf(); CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n", btf_fd, errno); - xattr.btf_fd = btf_fd; + map_opts.btf_fd = btf_fd; - map_fd = bpf_create_map_xattr(&xattr); - xattr.btf_fd = -1; + map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts); + map_opts.btf_fd = -1; close(btf_fd); CHECK(map_fd == -1, - "bpf_create_map_xattr()", "errno:%d\n", errno); + "bpf_map_create()", "errno:%d\n", errno); return map_fd; } @@ -463,20 +459,20 @@ static void test_sk_storage_map_basic(void) int cnt; int lock; } value = { .cnt = 0xeB9f, .lock = 0, }, lookup_value; - struct bpf_create_map_attr bad_xattr; + struct bpf_map_create_opts bad_xattr; int btf_fd, map_fd, sk_fd, err; btf_fd = load_btf(); CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n", btf_fd, errno); - xattr.btf_fd = btf_fd; + map_opts.btf_fd = btf_fd; sk_fd = socket(AF_INET6, SOCK_STREAM, 0); CHECK(sk_fd == -1, "socket()", "sk_fd:%d errno:%d\n", sk_fd, errno); - map_fd = bpf_create_map_xattr(&xattr); - CHECK(map_fd == -1, "bpf_create_map_xattr(good_xattr)", + map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts); + CHECK(map_fd == -1, "bpf_map_create(good_xattr)", "map_fd:%d errno:%d\n", map_fd, errno); /* Add new elem */ @@ -560,31 +556,29 @@ static void test_sk_storage_map_basic(void) CHECK(!err || errno != ENOENT, "bpf_map_delete_elem()", "err:%d errno:%d\n", err, errno); - memcpy(&bad_xattr, &xattr, sizeof(xattr)); + memcpy(&bad_xattr, &map_opts, sizeof(map_opts)); bad_xattr.btf_key_type_id = 0; - err = bpf_create_map_xattr(&bad_xattr); - CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)", + err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr); + CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)", "err:%d errno:%d\n", err, errno); - memcpy(&bad_xattr, &xattr, sizeof(xattr)); + memcpy(&bad_xattr, &map_opts, sizeof(map_opts)); bad_xattr.btf_key_type_id = 3; - err = bpf_create_map_xattr(&bad_xattr); - CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)", + err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr); + CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)", "err:%d errno:%d\n", err, errno); - memcpy(&bad_xattr, &xattr, sizeof(xattr)); - bad_xattr.max_entries = 1; - err = bpf_create_map_xattr(&bad_xattr); - CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)", + err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 1, &map_opts); + CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)", "err:%d errno:%d\n", err, errno); - memcpy(&bad_xattr, &xattr, sizeof(xattr)); + memcpy(&bad_xattr, &map_opts, sizeof(map_opts)); bad_xattr.map_flags = 0; - err = bpf_create_map_xattr(&bad_xattr); + err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr); CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)", "err:%d errno:%d\n", err, errno); - xattr.btf_fd = -1; + map_opts.btf_fd = -1; close(btf_fd); close(map_fd); close(sk_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c index 0f9525293881..86b7d5d84eec 100644 --- a/tools/testing/selftests/bpf/prog_tests/atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/atomics.c @@ -167,7 +167,7 @@ static void test_cmpxchg(struct atomics_lskel *skel) prog_fd = skel->progs.cmpxchg.prog_fd; err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "test_run add", + if (CHECK(err || retval, "test_run cmpxchg", "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) goto cleanup; @@ -196,7 +196,7 @@ static void test_xchg(struct atomics_lskel *skel) prog_fd = skel->progs.xchg.prog_fd; err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "test_run add", + if (CHECK(err || retval, "test_run xchg", "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c index be73e3de6668..d2d9e965eba5 100644 --- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -7,32 +7,33 @@ static void test_fail_cases(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts); __u32 value; int fd, err; /* Invalid key size */ - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 4, sizeof(value), 100, 0); - if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid key size")) + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 4, sizeof(value), 100, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid key size")) close(fd); /* Invalid value size */ - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, 0, 100, 0); - if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid value size 0")) + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, 0, 100, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value size 0")) close(fd); /* Invalid max entries size */ - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 0, 0); - if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid max entries size")) + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 0, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid max entries size")) close(fd); /* Bloom filter maps do not support BPF_F_NO_PREALLOC */ - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, - BPF_F_NO_PREALLOC); - if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid flags")) + opts.map_flags = BPF_F_NO_PREALLOC; + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid flags")) close(fd); - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, 0); - if (!ASSERT_GE(fd, 0, "bpf_create_map bloom filter")) + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, NULL); + if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter")) return; /* Test invalid flags */ @@ -56,13 +57,14 @@ static void test_fail_cases(void) static void test_success_cases(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts); char value[11]; int fd, err; /* Create a map */ - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, - BPF_F_ZERO_SEED | BPF_F_NUMA_NODE); - if (!ASSERT_GE(fd, 0, "bpf_create_map bloom filter success case")) + opts.map_flags = BPF_F_ZERO_SEED | BPF_F_NUMA_NODE; + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts); + if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter success case")) return; /* Add a value to the bloom filter */ @@ -100,9 +102,9 @@ static void test_inner_map(struct bloom_filter_map *skel, const __u32 *rand_vals struct bpf_link *link; /* Create a bloom filter map that will be used as the inner map */ - inner_map_fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(*rand_vals), - nr_rand_vals, 0); - if (!ASSERT_GE(inner_map_fd, 0, "bpf_create_map bloom filter inner map")) + inner_map_fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(*rand_vals), + nr_rand_vals, NULL); + if (!ASSERT_GE(inner_map_fd, 0, "bpf_map_create bloom filter inner map")) return; for (i = 0; i < nr_rand_vals; i++) { diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 3e10abce3e5a..b84f859b1267 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -469,12 +469,12 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) * fills seq_file buffer and then the other will trigger * overflow and needs restart. */ - map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); - if (CHECK(map1_fd < 0, "bpf_create_map", + map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); + if (CHECK(map1_fd < 0, "bpf_map_create", "map_creation failed: %s\n", strerror(errno))) goto out; - map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); - if (CHECK(map2_fd < 0, "bpf_create_map", + map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); + if (CHECK(map2_fd < 0, "bpf_map_create", "map_creation failed: %s\n", strerror(errno))) goto free_map1; @@ -1206,13 +1206,14 @@ static void test_task_vma(void) goto out; /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks - * to trigger seq_file corner cases. The expected output is much - * longer than 1kB, so the while loop will terminate. + * to trigger seq_file corner cases. */ len = 0; while (len < CMP_BUFFER_SIZE) { err = read_fd_into_buffer(iter_fd, task_vma_output + len, min(read_size, CMP_BUFFER_SIZE - len)); + if (!err) + break; if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n")) goto out; len += err; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_loop.c b/tools/testing/selftests/bpf/prog_tests/bpf_loop.c new file mode 100644 index 000000000000..380d7a2072e3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_loop.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> +#include <network_helpers.h> +#include "bpf_loop.skel.h" + +static void check_nr_loops(struct bpf_loop *skel) +{ + struct bpf_link *link; + + link = bpf_program__attach(skel->progs.test_prog); + if (!ASSERT_OK_PTR(link, "link")) + return; + + /* test 0 loops */ + skel->bss->nr_loops = 0; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, + "0 loops"); + + /* test 500 loops */ + skel->bss->nr_loops = 500; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, + "500 loops"); + ASSERT_EQ(skel->bss->g_output, (500 * 499) / 2, "g_output"); + + /* test exceeding the max limit */ + skel->bss->nr_loops = -1; + + usleep(1); + + ASSERT_EQ(skel->bss->err, -E2BIG, "over max limit"); + + bpf_link__destroy(link); +} + +static void check_callback_fn_stop(struct bpf_loop *skel) +{ + struct bpf_link *link; + + link = bpf_program__attach(skel->progs.test_prog); + if (!ASSERT_OK_PTR(link, "link")) + return; + + /* testing that loop is stopped when callback_fn returns 1 */ + skel->bss->nr_loops = 400; + skel->data->stop_index = 50; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->data->stop_index + 1, + "nr_loops_returned"); + ASSERT_EQ(skel->bss->g_output, (50 * 49) / 2, + "g_output"); + + bpf_link__destroy(link); +} + +static void check_null_callback_ctx(struct bpf_loop *skel) +{ + struct bpf_link *link; + + /* check that user is able to pass in a null callback_ctx */ + link = bpf_program__attach(skel->progs.prog_null_ctx); + if (!ASSERT_OK_PTR(link, "link")) + return; + + skel->bss->nr_loops = 10; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, + "nr_loops_returned"); + + bpf_link__destroy(link); +} + +static void check_invalid_flags(struct bpf_loop *skel) +{ + struct bpf_link *link; + + /* check that passing in non-zero flags returns -EINVAL */ + link = bpf_program__attach(skel->progs.prog_invalid_flags); + if (!ASSERT_OK_PTR(link, "link")) + return; + + usleep(1); + + ASSERT_EQ(skel->bss->err, -EINVAL, "err"); + + bpf_link__destroy(link); +} + +static void check_nested_calls(struct bpf_loop *skel) +{ + __u32 nr_loops = 100, nested_callback_nr_loops = 4; + struct bpf_link *link; + + /* check that nested calls are supported */ + link = bpf_program__attach(skel->progs.prog_nested_calls); + if (!ASSERT_OK_PTR(link, "link")) + return; + + skel->bss->nr_loops = nr_loops; + skel->bss->nested_callback_nr_loops = nested_callback_nr_loops; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, nr_loops * nested_callback_nr_loops + * nested_callback_nr_loops, "nr_loops_returned"); + ASSERT_EQ(skel->bss->g_output, (4 * 3) / 2 * nested_callback_nr_loops + * nr_loops, "g_output"); + + bpf_link__destroy(link); +} + +void test_bpf_loop(void) +{ + struct bpf_loop *skel; + + skel = bpf_loop__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_loop__open_and_load")) + return; + + skel->bss->pid = getpid(); + + if (test__start_subtest("check_nr_loops")) + check_nr_loops(skel); + if (test__start_subtest("check_callback_fn_stop")) + check_callback_fn_stop(skel); + if (test__start_subtest("check_null_callback_ctx")) + check_null_callback_ctx(skel); + if (test__start_subtest("check_invalid_flags")) + check_invalid_flags(skel); + if (test__start_subtest("check_nested_calls")) + check_nested_calls(skel); + + bpf_loop__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 94e03df69d71..8daca0ac909f 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -217,14 +217,16 @@ static bool found; static int libbpf_debug_print(enum libbpf_print_level level, const char *format, va_list args) { - char *log_buf; + const char *log_buf; if (level != LIBBPF_WARN || - strcmp(format, "libbpf: \n%s\n")) { + !strstr(format, "-- BEGIN PROG LOAD LOG --")) { vprintf(format, args); return 0; } + /* skip prog_name */ + va_arg(args, char *); log_buf = va_arg(args, char *); if (!log_buf) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index 27f5d8ea7964..ff6cce9fef06 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -19,16 +19,28 @@ extern int extra_prog_load_log_flags; static int check_load(const char *file, enum bpf_prog_type type) { - struct bpf_prog_load_attr attr; struct bpf_object *obj = NULL; - int err, prog_fd; - - memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); - attr.file = file; - attr.prog_type = type; - attr.log_level = 4 | extra_prog_load_log_flags; - attr.prog_flags = BPF_F_TEST_RND_HI32; - err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); + struct bpf_program *prog; + int err; + + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (err) + return err; + + prog = bpf_object__next_program(obj, NULL); + if (!prog) { + err = -ENOENT; + goto err_out; + } + + bpf_program__set_type(prog, type); + bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32); + bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags); + + err = bpf_object__load(obj); + +err_out: bpf_object__close(obj); return err; } @@ -115,6 +127,12 @@ void test_verif_scale_pyperf600() scale_test("pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } +void test_verif_scale_pyperf600_bpf_loop(void) +{ + /* use the bpf_loop helper*/ + scale_test("pyperf600_bpf_loop.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + void test_verif_scale_pyperf600_nounroll() { /* no unroll at all. @@ -165,6 +183,12 @@ void test_verif_scale_strobemeta() scale_test("strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } +void test_verif_scale_strobemeta_bpf_loop(void) +{ + /* use the bpf_loop helper*/ + scale_test("strobemeta_bpf_loop.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + void test_verif_scale_strobemeta_nounroll1() { /* no unroll, tiny loops */ diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 4aa6343dc4c8..01b776a7beeb 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -4071,10 +4071,32 @@ done: return raw_btf; } +static int load_raw_btf(const void *raw_data, size_t raw_size) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + int btf_fd; + + if (always_log) { + opts.log_buf = btf_log_buf, + opts.log_size = BTF_LOG_BUF_SIZE, + opts.log_level = 1; + } + + btf_fd = bpf_btf_load(raw_data, raw_size, &opts); + if (btf_fd < 0 && !always_log) { + opts.log_buf = btf_log_buf, + opts.log_size = BTF_LOG_BUF_SIZE, + opts.log_level = 1; + btf_fd = bpf_btf_load(raw_data, raw_size, &opts); + } + + return btf_fd; +} + static void do_test_raw(unsigned int test_num) { struct btf_raw_test *test = &raw_tests[test_num - 1]; - struct bpf_create_map_attr create_attr = {}; + LIBBPF_OPTS(bpf_map_create_opts, opts); int map_fd = -1, btf_fd = -1; unsigned int raw_btf_size; struct btf_header *hdr; @@ -4100,16 +4122,14 @@ static void do_test_raw(unsigned int test_num) hdr->str_len = (int)hdr->str_len + test->str_len_delta; *btf_log_buf = '\0'; - btf_fd = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd = load_raw_btf(raw_btf, raw_btf_size); free(raw_btf); err = ((btf_fd < 0) != test->btf_load_err); if (CHECK(err, "btf_fd:%d test->btf_load_err:%u", btf_fd, test->btf_load_err) || CHECK(test->err_str && !strstr(btf_log_buf, test->err_str), - "expected err_str:%s", test->err_str)) { + "expected err_str:%s\n", test->err_str)) { err = -1; goto done; } @@ -4117,16 +4137,11 @@ static void do_test_raw(unsigned int test_num) if (err || btf_fd < 0) goto done; - create_attr.name = test->map_name; - create_attr.map_type = test->map_type; - create_attr.key_size = test->key_size; - create_attr.value_size = test->value_size; - create_attr.max_entries = test->max_entries; - create_attr.btf_fd = btf_fd; - create_attr.btf_key_type_id = test->key_type_id; - create_attr.btf_value_type_id = test->value_type_id; - - map_fd = bpf_create_map_xattr(&create_attr); + opts.btf_fd = btf_fd; + opts.btf_key_type_id = test->key_type_id; + opts.btf_value_type_id = test->value_type_id; + map_fd = bpf_map_create(test->map_type, test->map_name, + test->key_size, test->value_size, test->max_entries, &opts); err = ((map_fd < 0) != test->map_create_err); CHECK(err, "map_fd:%d test->map_create_err:%u", @@ -4232,9 +4247,7 @@ static int test_big_btf_info(unsigned int test_num) goto done; } - btf_fd = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd = load_raw_btf(raw_btf, raw_btf_size); if (CHECK(btf_fd < 0, "errno:%d", errno)) { err = -1; goto done; @@ -4290,7 +4303,7 @@ done: static int test_btf_id(unsigned int test_num) { const struct btf_get_info_test *test = &get_info_tests[test_num - 1]; - struct bpf_create_map_attr create_attr = {}; + LIBBPF_OPTS(bpf_map_create_opts, opts); uint8_t *raw_btf = NULL, *user_btf[2] = {}; int btf_fd[2] = {-1, -1}, map_fd = -1; struct bpf_map_info map_info = {}; @@ -4320,9 +4333,7 @@ static int test_btf_id(unsigned int test_num) info[i].btf_size = raw_btf_size; } - btf_fd[0] = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd[0] = load_raw_btf(raw_btf, raw_btf_size); if (CHECK(btf_fd[0] < 0, "errno:%d", errno)) { err = -1; goto done; @@ -4355,16 +4366,11 @@ static int test_btf_id(unsigned int test_num) } /* Test btf members in struct bpf_map_info */ - create_attr.name = "test_btf_id"; - create_attr.map_type = BPF_MAP_TYPE_ARRAY; - create_attr.key_size = sizeof(int); - create_attr.value_size = sizeof(unsigned int); - create_attr.max_entries = 4; - create_attr.btf_fd = btf_fd[0]; - create_attr.btf_key_type_id = 1; - create_attr.btf_value_type_id = 2; - - map_fd = bpf_create_map_xattr(&create_attr); + opts.btf_fd = btf_fd[0]; + opts.btf_key_type_id = 1; + opts.btf_value_type_id = 2; + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_btf_id", + sizeof(int), sizeof(int), 4, &opts); if (CHECK(map_fd < 0, "errno:%d", errno)) { err = -1; goto done; @@ -4457,9 +4463,7 @@ static void do_test_get_info(unsigned int test_num) goto done; } - btf_fd = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd = load_raw_btf(raw_btf, raw_btf_size); if (CHECK(btf_fd <= 0, "errno:%d", errno)) { err = -1; goto done; @@ -5153,7 +5157,7 @@ static void do_test_pprint(int test_num) { const struct btf_raw_test *test = &pprint_test_template[test_num]; enum pprint_mapv_kind_t mapv_kind = test->mapv_kind; - struct bpf_create_map_attr create_attr = {}; + LIBBPF_OPTS(bpf_map_create_opts, opts); bool ordered_map, lossless_map, percpu_map; int err, ret, num_cpus, rounded_value_size; unsigned int key, nr_read_elems; @@ -5179,26 +5183,19 @@ static void do_test_pprint(int test_num) return; *btf_log_buf = '\0'; - btf_fd = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd = load_raw_btf(raw_btf, raw_btf_size); free(raw_btf); - if (CHECK(btf_fd < 0, "errno:%d", errno)) { + if (CHECK(btf_fd < 0, "errno:%d\n", errno)) { err = -1; goto done; } - create_attr.name = test->map_name; - create_attr.map_type = test->map_type; - create_attr.key_size = test->key_size; - create_attr.value_size = test->value_size; - create_attr.max_entries = test->max_entries; - create_attr.btf_fd = btf_fd; - create_attr.btf_key_type_id = test->key_type_id; - create_attr.btf_value_type_id = test->value_type_id; - - map_fd = bpf_create_map_xattr(&create_attr); + opts.btf_fd = btf_fd; + opts.btf_key_type_id = test->key_type_id; + opts.btf_value_type_id = test->value_type_id; + map_fd = bpf_map_create(test->map_type, test->map_name, + test->key_size, test->value_size, test->max_entries, &opts); if (CHECK(map_fd < 0, "errno:%d", errno)) { err = -1; goto done; @@ -6553,9 +6550,7 @@ static void do_test_info_raw(unsigned int test_num) return; *btf_log_buf = '\0'; - btf_fd = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd = load_raw_btf(raw_btf, raw_btf_size); free(raw_btf); if (CHECK(btf_fd < 0, "invalid btf_fd errno:%d", errno)) { @@ -7352,6 +7347,32 @@ static struct btf_dedup_test dedup_tests[] = { BTF_STR_SEC("\0tag1"), }, }, +{ + .descr = "dedup: btf_type_tag #5, struct", + .input = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [3] */ + BTF_MEMBER_ENC(NAME_NTH(3), 2, BTF_MEMBER_OFFSET(0, 0)), + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [4] */ + BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [5] */ + BTF_MEMBER_ENC(NAME_NTH(3), 4, BTF_MEMBER_OFFSET(0, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0t\0m"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [3] */ + BTF_MEMBER_ENC(NAME_NTH(3), 2, BTF_MEMBER_OFFSET(0, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0t\0m"), + }, +}, }; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c index 9d3b8d7a1537..90aac437576d 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c @@ -314,6 +314,117 @@ cleanup: btf__free(btf1); } +static void btf_add_dup_struct_in_cu(struct btf *btf, int start_id) +{ +#define ID(n) (start_id + n) + btf__set_pointer_size(btf, 8); /* enforce 64-bit arch */ + + btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */ + + btf__add_struct(btf, "s", 8); /* [2] struct s { */ + btf__add_field(btf, "a", ID(3), 0, 0); /* struct anon a; */ + btf__add_field(btf, "b", ID(4), 0, 0); /* struct anon b; */ + /* } */ + + btf__add_struct(btf, "(anon)", 8); /* [3] struct anon { */ + btf__add_field(btf, "f1", ID(1), 0, 0); /* int f1; */ + btf__add_field(btf, "f2", ID(1), 32, 0); /* int f2; */ + /* } */ + + btf__add_struct(btf, "(anon)", 8); /* [4] struct anon { */ + btf__add_field(btf, "f1", ID(1), 0, 0); /* int f1; */ + btf__add_field(btf, "f2", ID(1), 32, 0); /* int f2; */ + /* } */ +#undef ID +} + +static void test_split_dup_struct_in_cu() +{ + struct btf *btf1, *btf2 = NULL; + int err; + + /* generate the base data.. */ + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + + btf_add_dup_struct_in_cu(btf1, 0); + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=4 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32", + "[4] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32"); + + /* ..dedup them... */ + err = btf__dedup(btf1, NULL); + if (!ASSERT_OK(err, "btf_dedup")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=3 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32"); + + /* and add the same data on top of it */ + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + + btf_add_dup_struct_in_cu(btf2, 3); + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=3 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32", + "[4] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[5] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=6 bits_offset=0\n" + "\t'b' type_id=7 bits_offset=0", + "[6] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=4 bits_offset=0\n" + "\t'f2' type_id=4 bits_offset=32", + "[7] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=4 bits_offset=0\n" + "\t'f2' type_id=4 bits_offset=32"); + + err = btf__dedup(btf2, NULL); + if (!ASSERT_OK(err, "btf_dedup")) + goto cleanup; + + /* after dedup it should match the original data */ + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=3 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32"); + +cleanup: + btf__free(btf2); + btf__free(btf1); +} + void test_btf_dedup_split() { if (test__start_subtest("split_simple")) @@ -322,4 +433,6 @@ void test_btf_dedup_split() test_split_struct_duped(); if (test__start_subtest("split_fwd_resolve")) test_split_fwd_resolve(); + if (test__start_subtest("split_dup_struct_in_cu")) + test_split_dup_struct_in_cu(); } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index d6272013a5a3..9e26903f9170 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -323,7 +323,7 @@ static void test_btf_dump_int_data(struct btf *btf, struct btf_dump *d, char *str) { #ifdef __SIZEOF_INT128__ - __int128 i = 0xffffffffffffffff; + unsigned __int128 i = 0xffffffffffffffff; /* this dance is required because we cannot directly initialize * a 128-bit value to anything larger than a 64-bit value. @@ -756,7 +756,7 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d, /* overflow bpf_sock_ops struct with final element nonzero/zero. * Regardless of the value of the final field, we don't have all the * data we need to display it, so we should trigger an overflow. - * In other words oveflow checking should trump "is field zero?" + * In other words overflow checking should trump "is field zero?" * checks because if we've overflowed, it shouldn't matter what the * field is - we can't trust its value so shouldn't display it. */ diff --git a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c index 762f6a9da8b5..664ffc0364f4 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c @@ -90,7 +90,7 @@ static void print_err_line(void) static void test_conn(void) { - int listen_fd = -1, cli_fd = -1, err; + int listen_fd = -1, cli_fd = -1, srv_fd = -1, err; socklen_t addrlen = sizeof(srv_sa6); int srv_port; @@ -112,6 +112,10 @@ static void test_conn(void) if (CHECK_FAIL(cli_fd == -1)) goto done; + srv_fd = accept(listen_fd, NULL, NULL); + if (CHECK_FAIL(srv_fd == -1)) + goto done; + if (CHECK(skel->bss->listen_tp_sport != srv_port || skel->bss->req_sk_sport != srv_port, "Unexpected sk src port", @@ -134,11 +138,13 @@ done: close(listen_fd); if (cli_fd != -1) close(cli_fd); + if (srv_fd != -1) + close(srv_fd); } static void test_syncookie(void) { - int listen_fd = -1, cli_fd = -1, err; + int listen_fd = -1, cli_fd = -1, srv_fd = -1, err; socklen_t addrlen = sizeof(srv_sa6); int srv_port; @@ -161,6 +167,10 @@ static void test_syncookie(void) if (CHECK_FAIL(cli_fd == -1)) goto done; + srv_fd = accept(listen_fd, NULL, NULL); + if (CHECK_FAIL(srv_fd == -1)) + goto done; + if (CHECK(skel->bss->listen_tp_sport != srv_port, "Unexpected tp src port", "listen_tp_sport:%u expected:%u\n", @@ -188,6 +198,8 @@ done: close(listen_fd); if (cli_fd != -1) close(cli_fd); + if (srv_fd != -1) + close(srv_fd); } struct test { diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c index de9c3e12b0ea..d3e8f729c623 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c @@ -15,22 +15,22 @@ static int prog_load_cnt(int verdict, int val) int cgroup_storage_fd, percpu_cgroup_storage_fd; if (map_fd < 0) - map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); if (map_fd < 0) { printf("failed to create map '%s'\n", strerror(errno)); return -1; } - cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, - sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + cgroup_storage_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, + sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL); if (cgroup_storage_fd < 0) { printf("failed to create map '%s'\n", strerror(errno)); return -1; } - percpu_cgroup_storage_fd = bpf_create_map( - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, - sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + percpu_cgroup_storage_fd = bpf_map_create( + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL, + sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL); if (percpu_cgroup_storage_fd < 0) { printf("failed to create map '%s'\n", strerror(errno)); return -1; diff --git a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c index 9229db2f5ca5..ca574e1e30e6 100644 --- a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c +++ b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c @@ -51,19 +51,20 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type) bool v4 = family == AF_INET; __u16 expected_local_port = v4 ? 22222 : 22223; __u16 expected_peer_port = 60000; - struct bpf_prog_load_attr attr = { - .file = v4 ? "./connect_force_port4.o" : - "./connect_force_port6.o", - }; struct bpf_program *prog; struct bpf_object *obj; - int xlate_fd, fd, err; + const char *obj_file = v4 ? "connect_force_port4.o" : "connect_force_port6.o"; + int fd, err; __u32 duration = 0; - err = bpf_prog_load_xattr(&attr, &obj, &xlate_fd); - if (err) { - log_err("Failed to load BPF object"); + obj = bpf_object__open_file(obj_file, NULL); + if (!ASSERT_OK_PTR(obj, "bpf_obj_open")) return -1; + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "bpf_obj_load")) { + err = -EIO; + goto close_bpf_object; } prog = bpf_object__find_program_by_title(obj, v4 ? diff --git a/tools/testing/selftests/bpf/prog_tests/core_kern.c b/tools/testing/selftests/bpf/prog_tests/core_kern.c new file mode 100644 index 000000000000..561c5185d886 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_kern.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include "test_progs.h" +#include "core_kern.lskel.h" + +void test_core_kern_lskel(void) +{ + struct core_kern_lskel *skel; + + skel = core_kern_lskel__open_and_load(); + ASSERT_OK_PTR(skel, "open_and_load"); + core_kern_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 1041d0c593f6..44a9868c70ea 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -881,7 +881,8 @@ void test_core_reloc(void) data = mmap_data; memset(mmap_data, 0, sizeof(*data)); - memcpy(data->in, test_case->input, test_case->input_len); + if (test_case->input_len) + memcpy(data->in, test_case->input, test_case->input_len); data->my_pid_tgid = my_pid_tgid; link = bpf_program__attach_raw_tracepoint(prog, tp_name); diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c index 4184c399d4c6..977ab433a946 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -24,13 +24,19 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) { bool good_kern_stack = false, good_user_stack = false; const char *nonjit_func = "___bpf_prog_run"; - struct get_stack_trace_t *e = data; + /* perfbuf-submitted data is 4-byte aligned, but we need 8-byte + * alignment, so copy data into a local variable, for simplicity + */ + struct get_stack_trace_t e; int i, num_stack; static __u64 cnt; struct ksym *ks; cnt++; + memset(&e, 0, sizeof(e)); + memcpy(&e, data, size <= sizeof(e) ? size : sizeof(e)); + if (size < sizeof(struct get_stack_trace_t)) { __u64 *raw_data = data; bool found = false; @@ -57,19 +63,19 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) good_user_stack = true; } } else { - num_stack = e->kern_stack_size / sizeof(__u64); + num_stack = e.kern_stack_size / sizeof(__u64); if (env.jit_enabled) { good_kern_stack = num_stack > 0; } else { for (i = 0; i < num_stack; i++) { - ks = ksym_search(e->kern_stack[i]); + ks = ksym_search(e.kern_stack[i]); if (ks && (strcmp(ks->name, nonjit_func) == 0)) { good_kern_stack = true; break; } } } - if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0) + if (e.user_stack_size > 0 && e.user_stack_buildid_size > 0) good_user_stack = true; } diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index 2a49f8fcde06..ce10d2fc3a6c 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> #include <network_helpers.h> +#include "kfree_skb.skel.h" struct meta { int ifindex; @@ -58,16 +59,11 @@ void serial_test_kfree_skb(void) .ctx_in = &skb, .ctx_size_in = sizeof(skb), }; - struct bpf_prog_load_attr attr = { - .file = "./kfree_skb.o", - }; - - struct bpf_link *link = NULL, *link_fentry = NULL, *link_fexit = NULL; - struct bpf_map *perf_buf_map, *global_data; - struct bpf_program *prog, *fentry, *fexit; - struct bpf_object *obj, *obj2 = NULL; + struct kfree_skb *skel = NULL; + struct bpf_link *link; + struct bpf_object *obj; struct perf_buffer *pb = NULL; - int err, kfree_skb_fd; + int err; bool passed = false; __u32 duration = 0; const int zero = 0; @@ -78,40 +74,27 @@ void serial_test_kfree_skb(void) if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) return; - err = bpf_prog_load_xattr(&attr, &obj2, &kfree_skb_fd); - if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) - goto close_prog; - - prog = bpf_object__find_program_by_title(obj2, "tp_btf/kfree_skb"); - if (CHECK(!prog, "find_prog", "prog kfree_skb not found\n")) - goto close_prog; - fentry = bpf_object__find_program_by_title(obj2, "fentry/eth_type_trans"); - if (CHECK(!fentry, "find_prog", "prog eth_type_trans not found\n")) - goto close_prog; - fexit = bpf_object__find_program_by_title(obj2, "fexit/eth_type_trans"); - if (CHECK(!fexit, "find_prog", "prog eth_type_trans not found\n")) - goto close_prog; - - global_data = bpf_object__find_map_by_name(obj2, ".bss"); - if (CHECK(!global_data, "find global data", "not found\n")) + skel = kfree_skb__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kfree_skb_skel")) goto close_prog; - link = bpf_program__attach_raw_tracepoint(prog, NULL); + link = bpf_program__attach_raw_tracepoint(skel->progs.trace_kfree_skb, NULL); if (!ASSERT_OK_PTR(link, "attach_raw_tp")) goto close_prog; - link_fentry = bpf_program__attach_trace(fentry); - if (!ASSERT_OK_PTR(link_fentry, "attach fentry")) - goto close_prog; - link_fexit = bpf_program__attach_trace(fexit); - if (!ASSERT_OK_PTR(link_fexit, "attach fexit")) + skel->links.trace_kfree_skb = link; + + link = bpf_program__attach_trace(skel->progs.fentry_eth_type_trans); + if (!ASSERT_OK_PTR(link, "attach fentry")) goto close_prog; + skel->links.fentry_eth_type_trans = link; - perf_buf_map = bpf_object__find_map_by_name(obj2, "perf_buf_map"); - if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) + link = bpf_program__attach_trace(skel->progs.fexit_eth_type_trans); + if (!ASSERT_OK_PTR(link, "attach fexit")) goto close_prog; + skel->links.fexit_eth_type_trans = link; /* set up perf buffer */ - pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, + pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, on_sample, NULL, &passed, NULL); if (!ASSERT_OK_PTR(pb, "perf_buf__new")) goto close_prog; @@ -133,7 +116,7 @@ void serial_test_kfree_skb(void) */ ASSERT_TRUE(passed, "passed"); - err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok); + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.bss), &zero, test_ok); if (CHECK(err, "get_result", "failed to get output data: %d\n", err)) goto close_prog; @@ -141,9 +124,6 @@ void serial_test_kfree_skb(void) CHECK_FAIL(!test_ok[0] || !test_ok[1]); close_prog: perf_buffer__free(pb); - bpf_link__destroy(link); - bpf_link__destroy(link_fentry); - bpf_link__destroy(link_fexit); bpf_object__close(obj); - bpf_object__close(obj2); + kfree_skb__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 5c9c0176991b..7d7445ccc141 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -4,6 +4,7 @@ #include <network_helpers.h> #include "kfunc_call_test.lskel.h" #include "kfunc_call_test_subprog.skel.h" +#include "kfunc_call_test_subprog.lskel.h" static void test_main(void) { @@ -49,6 +50,26 @@ static void test_subprog(void) kfunc_call_test_subprog__destroy(skel); } +static void test_subprog_lskel(void) +{ + struct kfunc_call_test_subprog_lskel *skel; + int prog_fd, retval, err; + + skel = kfunc_call_test_subprog_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + return; + + prog_fd = skel->progs.kfunc_call_test1.prog_fd; + err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, (__u32 *)&retval, NULL); + ASSERT_OK(err, "bpf_prog_test_run(test1)"); + ASSERT_EQ(retval, 10, "test1-retval"); + ASSERT_NEQ(skel->data->active_res, -1, "active_res"); + ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res"); + + kfunc_call_test_subprog_lskel__destroy(skel); +} + void test_kfunc_call(void) { if (test__start_subtest("main")) @@ -56,4 +77,7 @@ void test_kfunc_call(void) if (test__start_subtest("subprog")) test_subprog(); + + if (test__start_subtest("subprog_lskel")) + test_subprog_lskel(); } diff --git a/tools/testing/selftests/bpf/prog_tests/legacy_printk.c b/tools/testing/selftests/bpf/prog_tests/legacy_printk.c new file mode 100644 index 000000000000..ec6e45f2a644 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/legacy_printk.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include "test_legacy_printk.skel.h" + +static int execute_one_variant(bool legacy) +{ + struct test_legacy_printk *skel; + int err, zero = 0, my_pid = getpid(), res, map_fd; + + skel = test_legacy_printk__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return -errno; + + bpf_program__set_autoload(skel->progs.handle_legacy, legacy); + bpf_program__set_autoload(skel->progs.handle_modern, !legacy); + + err = test_legacy_printk__load(skel); + /* no ASSERT_OK, we expect one of two variants can fail here */ + if (err) + goto err_out; + + if (legacy) { + map_fd = bpf_map__fd(skel->maps.my_pid_map); + err = bpf_map_update_elem(map_fd, &zero, &my_pid, BPF_ANY); + if (!ASSERT_OK(err, "my_pid_map_update")) + goto err_out; + err = bpf_map_lookup_elem(map_fd, &zero, &res); + } else { + skel->bss->my_pid_var = my_pid; + } + + err = test_legacy_printk__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto err_out; + + usleep(1); /* trigger */ + + if (legacy) { + map_fd = bpf_map__fd(skel->maps.res_map); + err = bpf_map_lookup_elem(map_fd, &zero, &res); + if (!ASSERT_OK(err, "res_map_lookup")) + goto err_out; + } else { + res = skel->bss->res_var; + } + + if (!ASSERT_GT(res, 0, "res")) { + err = -EINVAL; + goto err_out; + } + +err_out: + test_legacy_printk__destroy(skel); + return err; +} + +void test_legacy_printk(void) +{ + /* legacy variant should work everywhere */ + ASSERT_OK(execute_one_variant(true /* legacy */), "legacy_case"); + + /* execute modern variant, can fail the load on old kernels */ + execute_one_variant(false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c new file mode 100644 index 000000000000..e469b023962b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> +#include <bpf/btf.h> + +#include "test_log_buf.skel.h" + +static size_t libbpf_log_pos; +static char libbpf_log_buf[1024 * 1024]; +static bool libbpf_log_error; + +static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, va_list args) +{ + int emitted_cnt; + size_t left_cnt; + + left_cnt = sizeof(libbpf_log_buf) - libbpf_log_pos; + emitted_cnt = vsnprintf(libbpf_log_buf + libbpf_log_pos, left_cnt, fmt, args); + + if (emitted_cnt < 0 || emitted_cnt + 1 > left_cnt) { + libbpf_log_error = true; + return 0; + } + + libbpf_log_pos += emitted_cnt; + return 0; +} + +static void obj_load_log_buf(void) +{ + libbpf_print_fn_t old_print_cb = libbpf_set_print(libbpf_print_cb); + LIBBPF_OPTS(bpf_object_open_opts, opts); + const size_t log_buf_sz = 1024 * 1024; + struct test_log_buf* skel; + char *obj_log_buf, *good_log_buf, *bad_log_buf; + int err; + + obj_log_buf = malloc(3 * log_buf_sz); + if (!ASSERT_OK_PTR(obj_log_buf, "obj_log_buf")) + return; + + good_log_buf = obj_log_buf + log_buf_sz; + bad_log_buf = obj_log_buf + 2 * log_buf_sz; + obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0'; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 4; /* for BTF this will turn into 1 */ + + /* In the first round every prog has its own log_buf, so libbpf logs + * don't have program failure logs + */ + skel = test_log_buf__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + /* set very verbose level for good_prog so we always get detailed logs */ + bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz); + bpf_program__set_log_level(skel->progs.good_prog, 2); + + bpf_program__set_log_buf(skel->progs.bad_prog, bad_log_buf, log_buf_sz); + /* log_level 0 with custom log_buf means that verbose logs are not + * requested if program load is successful, but libbpf should retry + * with log_level 1 on error and put program's verbose load log into + * custom log_buf + */ + bpf_program__set_log_level(skel->progs.bad_prog, 0); + + err = test_log_buf__load(skel); + if (!ASSERT_ERR(err, "unexpected_load_success")) + goto cleanup; + + ASSERT_FALSE(libbpf_log_error, "libbpf_log_error"); + + /* there should be no prog loading log because we specified per-prog log buf */ + ASSERT_NULL(strstr(libbpf_log_buf, "-- BEGIN PROG LOAD LOG --"), "unexp_libbpf_log"); + ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"), + "libbpf_log_not_empty"); + ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty"); + ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), + "good_log_verbose"); + ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"), + "bad_log_not_empty"); + + if (env.verbosity > VERBOSE_NONE) { + printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf); + printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf); + printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf); + printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf); + } + + /* reset everything */ + test_log_buf__destroy(skel); + obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0'; + libbpf_log_buf[0] = '\0'; + libbpf_log_pos = 0; + libbpf_log_error = false; + + /* In the second round we let bad_prog's failure be logged through print callback */ + opts.kernel_log_buf = NULL; /* let everything through into print callback */ + opts.kernel_log_size = 0; + opts.kernel_log_level = 1; + + skel = test_log_buf__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + /* set normal verbose level for good_prog to check log_level is taken into account */ + bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz); + bpf_program__set_log_level(skel->progs.good_prog, 1); + + err = test_log_buf__load(skel); + if (!ASSERT_ERR(err, "unexpected_load_success")) + goto cleanup; + + ASSERT_FALSE(libbpf_log_error, "libbpf_log_error"); + + /* this time prog loading error should be logged through print callback */ + ASSERT_OK_PTR(strstr(libbpf_log_buf, "libbpf: prog 'bad_prog': -- BEGIN PROG LOAD LOG --"), + "libbpf_log_correct"); + ASSERT_STREQ(obj_log_buf, "", "obj_log__empty"); + ASSERT_STREQ(good_log_buf, "processed 4 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n", + "good_log_ok"); + ASSERT_STREQ(bad_log_buf, "", "bad_log_empty"); + + if (env.verbosity > VERBOSE_NONE) { + printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf); + printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf); + printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf); + printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf); + } + +cleanup: + free(obj_log_buf); + test_log_buf__destroy(skel); + libbpf_set_print(old_print_cb); +} + +static void bpf_prog_load_log_buf(void) +{ + const struct bpf_insn good_prog_insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + const size_t good_prog_insn_cnt = sizeof(good_prog_insns) / sizeof(struct bpf_insn); + const struct bpf_insn bad_prog_insns[] = { + BPF_EXIT_INSN(), + }; + size_t bad_prog_insn_cnt = sizeof(bad_prog_insns) / sizeof(struct bpf_insn); + LIBBPF_OPTS(bpf_prog_load_opts, opts); + const size_t log_buf_sz = 1024 * 1024; + char *log_buf; + int fd = -1; + + log_buf = malloc(log_buf_sz); + if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc")) + return; + opts.log_buf = log_buf; + opts.log_size = log_buf_sz; + + /* with log_level == 0 log_buf shoud stay empty for good prog */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL", + good_prog_insns, good_prog_insn_cnt, &opts); + ASSERT_STREQ(log_buf, "", "good_log_0"); + ASSERT_GE(fd, 0, "good_fd1"); + if (fd >= 0) + close(fd); + fd = -1; + + /* log_level == 2 should always fill log_buf, even for good prog */ + log_buf[0] = '\0'; + opts.log_level = 2; + fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL", + good_prog_insns, good_prog_insn_cnt, &opts); + ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), "good_log_2"); + ASSERT_GE(fd, 0, "good_fd2"); + if (fd >= 0) + close(fd); + fd = -1; + + /* log_level == 0 should fill log_buf for bad prog */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "bad_prog", "GPL", + bad_prog_insns, bad_prog_insn_cnt, &opts); + ASSERT_OK_PTR(strstr(log_buf, "R0 !read_ok"), "bad_log_0"); + ASSERT_LT(fd, 0, "bad_fd"); + if (fd >= 0) + close(fd); + fd = -1; + + free(log_buf); +} + +static void bpf_btf_load_log_buf(void) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + const size_t log_buf_sz = 1024 * 1024; + const void *raw_btf_data; + __u32 raw_btf_size; + struct btf *btf; + char *log_buf; + int fd = -1; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "empty_btf")) + return; + + ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type"); + + raw_btf_data = btf__raw_data(btf, &raw_btf_size); + if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good")) + goto cleanup; + + log_buf = malloc(log_buf_sz); + if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc")) + goto cleanup; + opts.log_buf = log_buf; + opts.log_size = log_buf_sz; + + /* with log_level == 0 log_buf shoud stay empty for good BTF */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts); + ASSERT_STREQ(log_buf, "", "good_log_0"); + ASSERT_GE(fd, 0, "good_fd1"); + if (fd >= 0) + close(fd); + fd = -1; + + /* log_level == 2 should always fill log_buf, even for good BTF */ + log_buf[0] = '\0'; + opts.log_level = 2; + fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts); + printf("LOG_BUF: %s\n", log_buf); + ASSERT_OK_PTR(strstr(log_buf, "magic: 0xeb9f"), "good_log_2"); + ASSERT_GE(fd, 0, "good_fd2"); + if (fd >= 0) + close(fd); + fd = -1; + + /* make BTF bad, add pointer pointing to non-existing type */ + ASSERT_GT(btf__add_ptr(btf, 100), 0, "bad_ptr_type"); + + raw_btf_data = btf__raw_data(btf, &raw_btf_size); + if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_bad")) + goto cleanup; + + /* log_level == 0 should fill log_buf for bad BTF */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts); + printf("LOG_BUF: %s\n", log_buf); + ASSERT_OK_PTR(strstr(log_buf, "[2] PTR (anon) type_id=100 Invalid type_id"), "bad_log_0"); + ASSERT_LT(fd, 0, "bad_fd"); + if (fd >= 0) + close(fd); + fd = -1; + +cleanup: + free(log_buf); + btf__free(btf); +} + +void test_log_buf(void) +{ + if (test__start_subtest("obj_load_log_buf")) + obj_load_log_buf(); + if (test__start_subtest("bpf_prog_load_log_buf")) + bpf_prog_load_log_buf(); + if (test__start_subtest("bpf_btf_load_log_buf")) + bpf_btf_load_log_buf(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_ptr.c b/tools/testing/selftests/bpf/prog_tests/map_ptr.c index 4972f92205c7..273725504f11 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_ptr.c +++ b/tools/testing/selftests/bpf/prog_tests/map_ptr.c @@ -4,31 +4,29 @@ #include <test_progs.h> #include <network_helpers.h> -#include "map_ptr_kern.skel.h" +#include "map_ptr_kern.lskel.h" void test_map_ptr(void) { - struct map_ptr_kern *skel; + struct map_ptr_kern_lskel *skel; __u32 duration = 0, retval; char buf[128]; int err; int page_size = getpagesize(); - skel = map_ptr_kern__open(); + skel = map_ptr_kern_lskel__open(); if (!ASSERT_OK_PTR(skel, "skel_open")) return; - err = bpf_map__set_max_entries(skel->maps.m_ringbuf, page_size); - if (!ASSERT_OK(err, "bpf_map__set_max_entries")) - goto cleanup; + skel->maps.m_ringbuf.max_entries = page_size; - err = map_ptr_kern__load(skel); + err = map_ptr_kern_lskel__load(skel); if (!ASSERT_OK(err, "skel_load")) goto cleanup; skel->bss->page_size = page_size; - err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4, + err = bpf_prog_test_run(skel->progs.cg_skb.prog_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, NULL, &retval, NULL); if (CHECK(err, "test_run", "err=%d errno=%d\n", err, errno)) @@ -39,5 +37,5 @@ void test_map_ptr(void) goto cleanup; cleanup: - map_ptr_kern__destroy(skel); + map_ptr_kern_lskel__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c index d4b953ae3407..31c09ba577eb 100644 --- a/tools/testing/selftests/bpf/prog_tests/pinning.c +++ b/tools/testing/selftests/bpf/prog_tests/pinning.c @@ -241,8 +241,8 @@ void test_pinning(void) goto out; } - map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(__u32), - sizeof(__u64), 1, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(__u32), + sizeof(__u64), 1, NULL); if (CHECK(map_fd < 0, "create pinmap manually", "fd %d\n", map_fd)) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/prog_array_init.c b/tools/testing/selftests/bpf/prog_tests/prog_array_init.c new file mode 100644 index 000000000000..fc4657619739 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prog_array_init.c @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include <test_progs.h> +#include "test_prog_array_init.skel.h" + +void test_prog_array_init(void) +{ + struct test_prog_array_init *skel; + int err; + + skel = test_prog_array_init__open(); + if (!ASSERT_OK_PTR(skel, "could not open BPF object")) + return; + + skel->rodata->my_pid = getpid(); + + err = test_prog_array_init__load(skel); + if (!ASSERT_OK(err, "could not load BPF object")) + goto cleanup; + + skel->links.entry = bpf_program__attach_raw_tracepoint(skel->progs.entry, "sys_enter"); + if (!ASSERT_OK_PTR(skel->links.entry, "could not attach BPF program")) + goto cleanup; + + usleep(1); + + ASSERT_EQ(skel->bss->value, 42, "unexpected value"); + +cleanup: + test_prog_array_init__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c index 8ccba3ab70ee..b9822f914eeb 100644 --- a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c +++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c @@ -14,7 +14,7 @@ static void test_queue_stack_map_by_type(int type) int i, err, prog_fd, map_in_fd, map_out_fd; char file[32], buf[128]; struct bpf_object *obj; - struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); + struct iphdr iph; /* Fill test values to be used */ for (i = 0; i < MAP_SIZE; i++) @@ -60,15 +60,17 @@ static void test_queue_stack_map_by_type(int type) err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); - if (err || retval || size != sizeof(pkt_v4) || - iph->daddr != val) + if (err || retval || size != sizeof(pkt_v4)) + break; + memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); + if (iph.daddr != val) break; } - CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val, + CHECK(err || retval || size != sizeof(pkt_v4) || iph.daddr != val, "bpf_map_pop_elem", "err %d errno %d retval %d size %d iph->daddr %u\n", - err, errno, retval, size, iph->daddr); + err, errno, retval, size, iph.daddr); /* Queue is empty, program should return TC_ACT_SHOT */ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c index 167cd8a2edfd..e945195b24c9 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c @@ -62,8 +62,8 @@ void test_ringbuf_multi(void) if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) goto cleanup; - proto_fd = bpf_create_map(BPF_MAP_TYPE_RINGBUF, 0, 0, page_size, 0); - if (CHECK(proto_fd < 0, "bpf_create_map", "bpf_create_map failed\n")) + proto_fd = bpf_map_create(BPF_MAP_TYPE_RINGBUF, NULL, 0, 0, page_size, NULL); + if (CHECK(proto_fd < 0, "bpf_map_create", "bpf_map_create failed\n")) goto cleanup; err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c index 3cfc910ab3c1..980ac0f2c0bb 100644 --- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c @@ -66,29 +66,20 @@ static union sa46 { static int create_maps(enum bpf_map_type inner_type) { - struct bpf_create_map_attr attr = {}; + LIBBPF_OPTS(bpf_map_create_opts, opts); inner_map_type = inner_type; /* Creating reuseport_array */ - attr.name = "reuseport_array"; - attr.map_type = inner_type; - attr.key_size = sizeof(__u32); - attr.value_size = sizeof(__u32); - attr.max_entries = REUSEPORT_ARRAY_SIZE; - - reuseport_array = bpf_create_map_xattr(&attr); + reuseport_array = bpf_map_create(inner_type, "reuseport_array", + sizeof(__u32), sizeof(__u32), REUSEPORT_ARRAY_SIZE, NULL); RET_ERR(reuseport_array < 0, "creating reuseport_array", "reuseport_array:%d errno:%d\n", reuseport_array, errno); /* Creating outer_map */ - attr.name = "outer_map"; - attr.map_type = BPF_MAP_TYPE_ARRAY_OF_MAPS; - attr.key_size = sizeof(__u32); - attr.value_size = sizeof(__u32); - attr.max_entries = 1; - attr.inner_map_fd = reuseport_array; - outer_map = bpf_create_map_xattr(&attr); + opts.inner_map_fd = reuseport_array; + outer_map = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, "outer_map", + sizeof(__u32), sizeof(__u32), 1, &opts); RET_ERR(outer_map < 0, "creating outer_map", "outer_map:%d errno:%d\n", outer_map, errno); diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 1352ec104149..85db0f4cdd95 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -91,9 +91,9 @@ static void test_sockmap_create_update_free(enum bpf_map_type map_type) if (CHECK_FAIL(s < 0)) return; - map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0); + map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); if (CHECK_FAIL(map < 0)) { - perror("bpf_create_map"); + perror("bpf_cmap_create"); goto out; } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c index 7a0d64fdc192..af293ea1542c 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -97,7 +97,7 @@ static void run_tests(int family, enum bpf_map_type map_type) char test_name[MAX_TEST_NAME]; int map; - map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0); + map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); if (CHECK_FAIL(map < 0)) { perror("bpf_map_create"); return; diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 2a9cb951bfd6..7e21bfab6358 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -502,8 +502,8 @@ static void test_lookup_32_bit_value(int family, int sotype, int mapfd) if (s < 0) return; - mapfd = bpf_create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(key), - sizeof(value32), 1, 0); + mapfd = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(key), + sizeof(value32), 1, NULL); if (mapfd < 0) { FAIL_ERRNO("map_create"); goto close; diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c index 86f97681ad89..6a953f4adfdc 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -167,20 +167,20 @@ static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title) static void run_test(int cgroup_fd) { - struct bpf_prog_load_attr attr = { - .file = "./sockopt_inherit.o", - }; int server_fd = -1, client_fd; struct bpf_object *obj; void *server_err; pthread_t tid; - int ignored; int err; - err = bpf_prog_load_xattr(&attr, &obj, &ignored); - if (CHECK_FAIL(err)) + obj = bpf_object__open_file("sockopt_inherit.o", NULL); + if (!ASSERT_OK_PTR(obj, "obj_open")) return; + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) + goto close_bpf_object; + err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt"); if (CHECK_FAIL(err)) goto close_bpf_object; diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c index bc34f7773444..abce12ddcc37 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c @@ -297,14 +297,10 @@ detach: void test_sockopt_multi(void) { - struct bpf_prog_load_attr attr = { - .file = "./sockopt_multi.o", - }; int cg_parent = -1, cg_child = -1; struct bpf_object *obj = NULL; int sock_fd = -1; int err = -1; - int ignored; cg_parent = test__join_cgroup("/parent"); if (CHECK_FAIL(cg_parent < 0)) @@ -314,8 +310,12 @@ void test_sockopt_multi(void) if (CHECK_FAIL(cg_child < 0)) goto out; - err = bpf_prog_load_xattr(&attr, &obj, &ignored); - if (CHECK_FAIL(err)) + obj = bpf_object__open_file("sockopt_multi.o", NULL); + if (!ASSERT_OK_PTR(obj, "obj_load")) + goto out; + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) goto out; sock_fd = socket(AF_INET, SOCK_STREAM, 0); diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c index 265b4fe33ec3..96ff2c20af81 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -2,6 +2,7 @@ #include <test_progs.h> #include "cgroup_helpers.h" #include "network_helpers.h" +#include "tcp_rtt.skel.h" struct tcp_rtt_storage { __u32 invoked; @@ -91,26 +92,18 @@ static int verify_sk(int map_fd, int client_fd, const char *msg, __u32 invoked, static int run_test(int cgroup_fd, int server_fd) { - struct bpf_prog_load_attr attr = { - .prog_type = BPF_PROG_TYPE_SOCK_OPS, - .file = "./tcp_rtt.o", - .expected_attach_type = BPF_CGROUP_SOCK_OPS, - }; - struct bpf_object *obj; - struct bpf_map *map; + struct tcp_rtt *skel; int client_fd; int prog_fd; int map_fd; int err; - err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); - if (err) { - log_err("Failed to load BPF object"); + skel = tcp_rtt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_load")) return -1; - } - map = bpf_object__next_map(obj, NULL); - map_fd = bpf_map__fd(map); + map_fd = bpf_map__fd(skel->maps.socket_storage_map); + prog_fd = bpf_program__fd(skel->progs._sockops); err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); if (err) { @@ -149,7 +142,7 @@ close_client_fd: close(client_fd); close_bpf_object: - bpf_object__close(obj); + tcp_rtt__destroy(skel); return err; } diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c index d29ebfeef9c5..214d9f4a94a5 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c +++ b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c @@ -19,11 +19,13 @@ static int read_iter(char *file) fd = open(file, 0); if (fd < 0) return -1; - while ((len = read(fd, buf, sizeof(buf))) > 0) + while ((len = read(fd, buf, sizeof(buf))) > 0) { + buf[sizeof(buf) - 1] = '\0'; if (strstr(buf, "iter")) { close(fd); return 0; } + } close(fd); return -1; } @@ -80,7 +82,7 @@ static int fn(void) if (!ASSERT_OK(err, "creating " TDIR "/fs1/b")) goto out; - map = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0); + map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL); if (!ASSERT_GT(map, 0, "create_map(ARRAY)")) goto out; err = bpf_obj_pin(map, TDIR "/fs1/c"); diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c index 7e13129f593a..509e21d5cb9d 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c @@ -30,17 +30,29 @@ extern int extra_prog_load_log_flags; static int check_load(const char *file) { - struct bpf_prog_load_attr attr; struct bpf_object *obj = NULL; - int err, prog_fd; + struct bpf_program *prog; + int err; - memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); - attr.file = file; - attr.prog_type = BPF_PROG_TYPE_UNSPEC; - attr.log_level = extra_prog_load_log_flags; - attr.prog_flags = BPF_F_TEST_RND_HI32; found = false; - err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); + + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (err) + return err; + + prog = bpf_object__next_program(obj, NULL); + if (!prog) { + err = -ENOENT; + goto err_out; + } + + bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32); + bpf_program__set_log_level(prog, extra_prog_load_log_flags); + + err = bpf_object__load(obj); + +err_out: bpf_object__close(obj); return err; } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c index 7a7ef9d4e151..ac65456b7ab8 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp.c @@ -11,8 +11,8 @@ void test_xdp(void) const char *file = "./test_xdp.o"; struct bpf_object *obj; char buf[128]; - struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr); - struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); + struct ipv6hdr iph6; + struct iphdr iph; __u32 duration, retval, size; int err, prog_fd, map_fd; @@ -28,16 +28,17 @@ void test_xdp(void) err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); - + memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); CHECK(err || retval != XDP_TX || size != 74 || - iph->protocol != IPPROTO_IPIP, "ipv4", + iph.protocol != IPPROTO_IPIP, "ipv4", "err %d errno %d retval %d size %d\n", err, errno, retval, size); err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), buf, &size, &retval, &duration); + memcpy(&iph6, buf + sizeof(struct ethhdr), sizeof(iph6)); CHECK(err || retval != XDP_TX || size != 114 || - iph6->nexthdr != IPPROTO_IPV6, "ipv6", + iph6.nexthdr != IPPROTO_IPV6, "ipv6", "err %d errno %d retval %d size %d\n", err, errno, retval, size); out: diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c index faa22b84f2ee..5e3a26b15ec6 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -218,9 +218,9 @@ static int send_udp_packets(int vary_dst_ip) .h_dest = BOND2_MAC, .h_proto = htons(ETH_P_IP), }; - uint8_t buf[128] = {}; - struct iphdr *iph = (struct iphdr *)(buf + sizeof(eh)); - struct udphdr *uh = (struct udphdr *)(buf + sizeof(eh) + sizeof(*iph)); + struct iphdr iph = {}; + struct udphdr uh = {}; + uint8_t buf[128]; int i, s = -1; int ifindex; @@ -232,17 +232,16 @@ static int send_udp_packets(int vary_dst_ip) if (!ASSERT_GT(ifindex, 0, "get bond1 ifindex")) goto err; - memcpy(buf, &eh, sizeof(eh)); - iph->ihl = 5; - iph->version = 4; - iph->tos = 16; - iph->id = 1; - iph->ttl = 64; - iph->protocol = IPPROTO_UDP; - iph->saddr = 1; - iph->daddr = 2; - iph->tot_len = htons(sizeof(buf) - ETH_HLEN); - iph->check = 0; + iph.ihl = 5; + iph.version = 4; + iph.tos = 16; + iph.id = 1; + iph.ttl = 64; + iph.protocol = IPPROTO_UDP; + iph.saddr = 1; + iph.daddr = 2; + iph.tot_len = htons(sizeof(buf) - ETH_HLEN); + iph.check = 0; for (i = 1; i <= NPACKETS; i++) { int n; @@ -253,10 +252,15 @@ static int send_udp_packets(int vary_dst_ip) }; /* vary the UDP destination port for even distribution with roundrobin/xor modes */ - uh->dest++; + uh.dest++; if (vary_dst_ip) - iph->daddr++; + iph.daddr++; + + /* construct a packet */ + memcpy(buf, &eh, sizeof(eh)); + memcpy(buf + sizeof(eh), &iph, sizeof(iph)); + memcpy(buf + sizeof(eh) + sizeof(iph), &uh, sizeof(uh)); n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&saddr_ll, sizeof(saddr_ll)); if (!ASSERT_EQ(n, sizeof(buf), "sendto")) diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c index f99386d1dc4c..c98a897ad692 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c @@ -42,7 +42,7 @@ void test_xdp_bpf2bpf(void) char buf[128]; int err, pkt_fd, map_fd; bool passed = false; - struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); + struct iphdr iph; struct iptnl_info value4 = {.family = AF_INET}; struct test_xdp *pkt_skel = NULL; struct test_xdp_bpf2bpf *ftrace_skel = NULL; @@ -93,9 +93,9 @@ void test_xdp_bpf2bpf(void) /* Run test program */ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); - + memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); if (CHECK(err || retval != XDP_TX || size != 74 || - iph->protocol != IPPROTO_IPIP, "ipv4", + iph.protocol != IPPROTO_IPIP, "ipv4", "err %d errno %d retval %d size %d\n", err, errno, retval, size)) goto out; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_unix.c b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c index 94423902685d..c21e3f545371 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_unix.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c @@ -49,7 +49,7 @@ int dump_unix(struct bpf_iter__unix *ctx) sock_i_ino(sk)); if (unix_sk->addr) { - if (!UNIX_ABSTRACT(unix_sk)) { + if (unix_sk->addr->name->sun_path[0]) { BPF_SEQ_PRINTF(seq, " %s", unix_sk->addr->name->sun_path); } else { /* The name of the abstract UNIX domain socket starts diff --git a/tools/testing/selftests/bpf/progs/bpf_loop.c b/tools/testing/selftests/bpf/progs/bpf_loop.c new file mode 100644 index 000000000000..12349e4601e8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_loop.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct callback_ctx { + int output; +}; + +/* These should be set by the user program */ +u32 nested_callback_nr_loops; +u32 stop_index = -1; +u32 nr_loops; +int pid; + +/* Making these global variables so that the userspace program + * can verify the output through the skeleton + */ +int nr_loops_returned; +int g_output; +int err; + +static int callback(__u32 index, void *data) +{ + struct callback_ctx *ctx = data; + + if (index >= stop_index) + return 1; + + ctx->output += index; + + return 0; +} + +static int empty_callback(__u32 index, void *data) +{ + return 0; +} + +static int nested_callback2(__u32 index, void *data) +{ + nr_loops_returned += bpf_loop(nested_callback_nr_loops, callback, data, 0); + + return 0; +} + +static int nested_callback1(__u32 index, void *data) +{ + bpf_loop(nested_callback_nr_loops, nested_callback2, data, 0); + return 0; +} + +SEC("fentry/__x64_sys_nanosleep") +int test_prog(void *ctx) +{ + struct callback_ctx data = {}; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + nr_loops_returned = bpf_loop(nr_loops, callback, &data, 0); + + if (nr_loops_returned < 0) + err = nr_loops_returned; + else + g_output = data.output; + + return 0; +} + +SEC("fentry/__x64_sys_nanosleep") +int prog_null_ctx(void *ctx) +{ + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + nr_loops_returned = bpf_loop(nr_loops, empty_callback, NULL, 0); + + return 0; +} + +SEC("fentry/__x64_sys_nanosleep") +int prog_invalid_flags(void *ctx) +{ + struct callback_ctx data = {}; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + err = bpf_loop(nr_loops, callback, &data, 1); + + return 0; +} + +SEC("fentry/__x64_sys_nanosleep") +int prog_nested_calls(void *ctx) +{ + struct callback_ctx data = {}; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + nr_loops_returned = 0; + bpf_loop(nr_loops, nested_callback1, &data, 0); + + g_output = data.output; + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c new file mode 100644 index 000000000000..9dafdc244462 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +u32 nr_loops; +long hits; + +static int empty_callback(__u32 index, void *data) +{ + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int benchmark(void *ctx) +{ + for (int i = 0; i < 1000; i++) { + bpf_loop(nr_loops, empty_callback, NULL, 0); + + __sync_add_and_fetch(&hits, nr_loops); + } + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h index eef5646ddb19..e0f42601be9b 100644 --- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -6,8 +6,6 @@ #define AF_INET6 10 #define __SO_ACCEPTCON (1 << 16) -#define UNIX_HASH_SIZE 256 -#define UNIX_ABSTRACT(unix_sk) (unix_sk->addr->hash < UNIX_HASH_SIZE) #define SOL_TCP 6 #define TCP_CONGESTION 13 diff --git a/tools/testing/selftests/bpf/progs/core_kern.c b/tools/testing/selftests/bpf/progs/core_kern.c new file mode 100644 index 000000000000..13499cc15c7d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/core_kern.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" + +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> + +#define ATTR __always_inline +#include "test_jhash.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, 256); +} array1 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, 256); +} array2 SEC(".maps"); + +static __noinline int randmap(int v, const struct net_device *dev) +{ + struct bpf_map *map = (struct bpf_map *)&array1; + int key = bpf_get_prandom_u32() & 0xff; + int *val; + + if (bpf_get_prandom_u32() & 1) + map = (struct bpf_map *)&array2; + + val = bpf_map_lookup_elem(map, &key); + if (val) + *val = bpf_get_prandom_u32() + v + dev->mtu; + + return 0; +} + +SEC("tp_btf/xdp_devmap_xmit") +int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device + *from_dev, const struct net_device *to_dev, int sent, int drops, + int err) +{ + return randmap(from_dev->ifindex, from_dev); +} + +SEC("fentry/eth_type_trans") +int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, + struct net_device *dev, unsigned short protocol) +{ + return randmap(dev->ifindex + skb->len, dev); +} + +SEC("fexit/eth_type_trans") +int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb, + struct net_device *dev, unsigned short protocol) +{ + return randmap(dev->ifindex + skb->len, dev); +} + +volatile const int never; + +struct __sk_bUfF /* it will not exist in vmlinux */ { + int len; +} __attribute__((preserve_access_index)); + +struct bpf_testmod_test_read_ctx /* it exists in bpf_testmod */ { + size_t len; +} __attribute__((preserve_access_index)); + +SEC("tc") +int balancer_ingress(struct __sk_buff *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + void *ptr; + int ret = 0, nh_off, i = 0; + + nh_off = 14; + + /* pragma unroll doesn't work on large loops */ +#define C do { \ + ptr = data + i; \ + if (ptr + nh_off > data_end) \ + break; \ + ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \ + if (never) { \ + /* below is a dead code with unresolvable CO-RE relo */ \ + i += ((struct __sk_bUfF *)ctx)->len; \ + /* this CO-RE relo may or may not resolve + * depending on whether bpf_testmod is loaded. + */ \ + i += ((struct bpf_testmod_test_read_ctx *)ctx)->len; \ + } \ + } while (0); +#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C; + C30;C30;C30; /* 90 calls */ + return 0; +} + +char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c index b1b711d9b214..b64df94ec476 100644 --- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c +++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c @@ -334,9 +334,11 @@ static inline int check_lpm_trie(void) return 1; } +#define INNER_MAX_ENTRIES 1234 + struct inner_map { __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); + __uint(max_entries, INNER_MAX_ENTRIES); __type(key, __u32); __type(value, __u32); } inner_map SEC(".maps"); @@ -348,7 +350,7 @@ struct { __type(value, __u32); __array(values, struct { __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); + __uint(max_entries, INNER_MAX_ENTRIES); __type(key, __u32); __type(value, __u32); }); @@ -360,8 +362,13 @@ static inline int check_array_of_maps(void) { struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps; struct bpf_map *map = (struct bpf_map *)&m_array_of_maps; + struct bpf_array *inner_map; + int key = 0; VERIFY(check_default(&array_of_maps->map, map)); + inner_map = bpf_map_lookup_elem(array_of_maps, &key); + VERIFY(inner_map != 0); + VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES); return 1; } @@ -382,8 +389,13 @@ static inline int check_hash_of_maps(void) { struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps; struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps; + struct bpf_htab *inner_map; + int key = 2; VERIFY(check_default(&hash_of_maps->map, map)); + inner_map = bpf_map_lookup_elem(hash_of_maps, &key); + VERIFY(inner_map != 0); + VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES); return 1; } diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h index 2fb7adafb6b6..1ed28882daf3 100644 --- a/tools/testing/selftests/bpf/progs/pyperf.h +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -159,6 +159,59 @@ struct { __uint(value_size, sizeof(long long) * 127); } stackmap SEC(".maps"); +#ifdef USE_BPF_LOOP +struct process_frame_ctx { + int cur_cpu; + int32_t *symbol_counter; + void *frame_ptr; + FrameData *frame; + PidData *pidData; + Symbol *sym; + Event *event; + bool done; +}; + +#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var)) + +static int process_frame_callback(__u32 i, struct process_frame_ctx *ctx) +{ + int zero = 0; + void *frame_ptr = ctx->frame_ptr; + PidData *pidData = ctx->pidData; + FrameData *frame = ctx->frame; + int32_t *symbol_counter = ctx->symbol_counter; + int cur_cpu = ctx->cur_cpu; + Event *event = ctx->event; + Symbol *sym = ctx->sym; + + if (frame_ptr && get_frame_data(frame_ptr, pidData, frame, sym)) { + int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu; + int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, sym); + + if (!symbol_id) { + bpf_map_update_elem(&symbolmap, sym, &zero, 0); + symbol_id = bpf_map_lookup_elem(&symbolmap, sym); + if (!symbol_id) { + ctx->done = true; + return 1; + } + } + if (*symbol_id == new_symbol_id) + (*symbol_counter)++; + + barrier_var(i); + if (i >= STACK_MAX_LEN) + return 1; + + event->stack[i] = *symbol_id; + + event->stack_len = i + 1; + frame_ptr = frame->f_back; + } + return 0; +} +#endif /* USE_BPF_LOOP */ + #ifdef GLOBAL_FUNC __noinline #elif defined(SUBPROGS) @@ -228,11 +281,26 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx) int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym); if (symbol_counter == NULL) return 0; +#ifdef USE_BPF_LOOP + struct process_frame_ctx ctx = { + .cur_cpu = cur_cpu, + .symbol_counter = symbol_counter, + .frame_ptr = frame_ptr, + .frame = &frame, + .pidData = pidData, + .sym = &sym, + .event = event, + }; + + bpf_loop(STACK_MAX_LEN, process_frame_callback, &ctx, 0); + if (ctx.done) + return 0; +#else #ifdef NO_UNROLL #pragma clang loop unroll(disable) #else #pragma clang loop unroll(full) -#endif +#endif /* NO_UNROLL */ /* Unwind python stack */ for (int i = 0; i < STACK_MAX_LEN; ++i) { if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) { @@ -251,6 +319,7 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx) frame_ptr = frame.f_back; } } +#endif /* USE_BPF_LOOP */ event->stack_complete = frame_ptr == NULL; } else { event->stack_complete = 1; diff --git a/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c b/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c new file mode 100644 index 000000000000..5c2059dc01af --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#define STACK_MAX_LEN 600 +#define USE_BPF_LOOP +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index 60c93aee2f4a..753718595c26 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -445,6 +445,48 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, return payload; } +#ifdef USE_BPF_LOOP +enum read_type { + READ_INT_VAR, + READ_MAP_VAR, + READ_STR_VAR, +}; + +struct read_var_ctx { + struct strobemeta_payload *data; + void *tls_base; + struct strobemeta_cfg *cfg; + void *payload; + /* value gets mutated */ + struct strobe_value_generic *value; + enum read_type type; +}; + +static int read_var_callback(__u32 index, struct read_var_ctx *ctx) +{ + switch (ctx->type) { + case READ_INT_VAR: + if (index >= STROBE_MAX_INTS) + return 1; + read_int_var(ctx->cfg, index, ctx->tls_base, ctx->value, ctx->data); + break; + case READ_MAP_VAR: + if (index >= STROBE_MAX_MAPS) + return 1; + ctx->payload = read_map_var(ctx->cfg, index, ctx->tls_base, + ctx->value, ctx->data, ctx->payload); + break; + case READ_STR_VAR: + if (index >= STROBE_MAX_STRS) + return 1; + ctx->payload += read_str_var(ctx->cfg, index, ctx->tls_base, + ctx->value, ctx->data, ctx->payload); + break; + } + return 0; +} +#endif /* USE_BPF_LOOP */ + /* * read_strobe_meta returns NULL, if no metadata was read; otherwise returns * pointer to *right after* payload ends @@ -475,11 +517,36 @@ static void *read_strobe_meta(struct task_struct *task, */ tls_base = (void *)task; +#ifdef USE_BPF_LOOP + struct read_var_ctx ctx = { + .cfg = cfg, + .tls_base = tls_base, + .value = &value, + .data = data, + .payload = payload, + }; + int err; + + ctx.type = READ_INT_VAR; + err = bpf_loop(STROBE_MAX_INTS, read_var_callback, &ctx, 0); + if (err != STROBE_MAX_INTS) + return NULL; + + ctx.type = READ_STR_VAR; + err = bpf_loop(STROBE_MAX_STRS, read_var_callback, &ctx, 0); + if (err != STROBE_MAX_STRS) + return NULL; + + ctx.type = READ_MAP_VAR; + err = bpf_loop(STROBE_MAX_MAPS, read_var_callback, &ctx, 0); + if (err != STROBE_MAX_MAPS) + return NULL; +#else #ifdef NO_UNROLL #pragma clang loop unroll(disable) #else #pragma unroll -#endif +#endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_INTS; ++i) { read_int_var(cfg, i, tls_base, &value, data); } @@ -487,7 +554,7 @@ static void *read_strobe_meta(struct task_struct *task, #pragma clang loop unroll(disable) #else #pragma unroll -#endif +#endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_STRS; ++i) { payload += read_str_var(cfg, i, tls_base, &value, data, payload); } @@ -495,10 +562,12 @@ static void *read_strobe_meta(struct task_struct *task, #pragma clang loop unroll(disable) #else #pragma unroll -#endif +#endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_MAPS; ++i) { payload = read_map_var(cfg, i, tls_base, &value, data, payload); } +#endif /* USE_BPF_LOOP */ + /* * return pointer right after end of payload, so it's possible to * calculate exact amount of useful data that needs to be sent diff --git a/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c b/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c new file mode 100644 index 000000000000..d18b992f0165 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2021 Facebook */ + +#define STROBE_MAX_INTS 2 +#define STROBE_MAX_STRS 25 +#define STROBE_MAX_MAPS 100 +#define STROBE_MAX_MAP_ENTRIES 20 +#define USE_BPF_LOOP +#include "strobemeta.h" diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c index 8eadbd4caf7a..5f8379aadb29 100644 --- a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c +++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c @@ -38,7 +38,7 @@ int pass_handler(const void *ctx) /* tests existing symbols. */ rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0); if (rq) - out__existing_typed = 0; + out__existing_typed = rq->cpu; out__existing_typeless = (__u64)&bpf_prog_active; /* tests non-existent symbols. */ diff --git a/tools/testing/selftests/bpf/progs/test_legacy_printk.c b/tools/testing/selftests/bpf/progs/test_legacy_printk.c new file mode 100644 index 000000000000..64c2d9ced529 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_legacy_printk.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <linux/bpf.h> +#define BPF_NO_GLOBAL_DATA +#include <bpf/bpf_helpers.h> + +char LICENSE[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, int); + __uint(max_entries, 1); +} my_pid_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, int); + __uint(max_entries, 1); +} res_map SEC(".maps"); + +volatile int my_pid_var = 0; +volatile int res_var = 0; + +SEC("tp/raw_syscalls/sys_enter") +int handle_legacy(void *ctx) +{ + int zero = 0, *my_pid, cur_pid, *my_res; + + my_pid = bpf_map_lookup_elem(&my_pid_map, &zero); + if (!my_pid) + return 1; + + cur_pid = bpf_get_current_pid_tgid() >> 32; + if (cur_pid != *my_pid) + return 1; + + my_res = bpf_map_lookup_elem(&res_map, &zero); + if (!my_res) + return 1; + + if (*my_res == 0) + /* use bpf_printk() in combination with BPF_NO_GLOBAL_DATA to + * force .rodata.str1.1 section that previously caused + * problems on old kernels due to libbpf always tried to + * create a global data map for it + */ + bpf_printk("Legacy-case bpf_printk test, pid %d\n", cur_pid); + *my_res = 1; + + return *my_res; +} + +SEC("tp/raw_syscalls/sys_enter") +int handle_modern(void *ctx) +{ + int zero = 0, cur_pid; + + cur_pid = bpf_get_current_pid_tgid() >> 32; + if (cur_pid != my_pid_var) + return 1; + + if (res_var == 0) + /* we need bpf_printk() to validate libbpf logic around unused + * global maps and legacy kernels; see comment in handle_legacy() + */ + bpf_printk("Modern-case bpf_printk test, pid %d\n", cur_pid); + res_var = 1; + + return res_var; +} diff --git a/tools/testing/selftests/bpf/progs/test_log_buf.c b/tools/testing/selftests/bpf/progs/test_log_buf.c new file mode 100644 index 000000000000..199f459bd5ae --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_log_buf.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +int a[4]; +const volatile int off = 4000; + +SEC("raw_tp/sys_enter") +int good_prog(const void *ctx) +{ + a[0] = (int)(long)ctx; + return a[1]; +} + +SEC("raw_tp/sys_enter") +int bad_prog(const void *ctx) +{ + /* out of bounds access */ + return a[off]; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c index b36857093f71..50ce16d02da7 100644 --- a/tools/testing/selftests/bpf/progs/test_module_attach.c +++ b/tools/testing/selftests/bpf/progs/test_module_attach.c @@ -87,6 +87,18 @@ int BPF_PROG(handle_fexit, return 0; } +SEC("fexit/bpf_testmod_return_ptr") +int BPF_PROG(handle_fexit_ret, int arg, struct file *ret) +{ + long buf = 0; + + bpf_probe_read_kernel(&buf, 8, ret); + bpf_probe_read_kernel(&buf, 8, (char *)ret + 256); + *(volatile long long *)ret; + *(volatile int *)&ret->f_mode; + return 0; +} + __u32 fmod_ret_read_sz = 0; SEC("fmod_ret/bpf_testmod_test_read") diff --git a/tools/testing/selftests/bpf/progs/test_prog_array_init.c b/tools/testing/selftests/bpf/progs/test_prog_array_init.c new file mode 100644 index 000000000000..2cd138356126 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_prog_array_init.c @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +const volatile pid_t my_pid = 0; +int value = 0; + +SEC("raw_tp/sys_enter") +int tailcall_1(void *ctx) +{ + value = 42; + return 0; +} + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 2); + __uint(key_size, sizeof(__u32)); + __array(values, int (void *)); +} prog_array_init SEC(".maps") = { + .values = { + [1] = (void *)&tailcall_1, + }, +}; + +SEC("raw_tp/sys_enter") +int entry(void *ctx) +{ + pid_t pid = bpf_get_current_pid_tgid() >> 32; + + if (pid != my_pid) + return 0; + + bpf_tail_call(ctx, &prog_array_init, 1); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c index 8e94e5c080aa..6dc1f28fc4b6 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c +++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c @@ -68,7 +68,7 @@ static void set_task_info(struct sock *sk) } SEC("fentry/inet_csk_listen_start") -int BPF_PROG(trace_inet_csk_listen_start, struct sock *sk, int backlog) +int BPF_PROG(trace_inet_csk_listen_start, struct sock *sk) { set_task_info(sk); diff --git a/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c b/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c index a408ec95cba4..eacda9fe07eb 100644 --- a/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c +++ b/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c @@ -23,7 +23,7 @@ int BPF_PROG(unix_listen, struct socket *sock, int backlog) if (!unix_sk) return 0; - if (!UNIX_ABSTRACT(unix_sk)) + if (unix_sk->addr->name->sun_path[0]) return 0; len = unix_sk->addr->len - sizeof(short); diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale2.c b/tools/testing/selftests/bpf/progs/test_verif_scale2.c index f024154c7be7..f90ffcafd1e8 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale2.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale2.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #define ATTR __always_inline #include "test_jhash.h" -SEC("scale90_inline") +SEC("tc") int balancer_ingress(struct __sk_buff *ctx) { void *data_end = (void *)(long)ctx->data_end; diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c index 9a4d09590b3d..2098f3f27f18 100644 --- a/tools/testing/selftests/bpf/progs/trigger_bench.c +++ b/tools/testing/selftests/bpf/progs/trigger_bench.c @@ -52,3 +52,10 @@ int bench_trigger_fmodret(void *ctx) __sync_add_and_fetch(&hits, 1); return -22; } + +SEC("uprobe/self/uprobe_target") +int bench_trigger_uprobe(void *ctx) +{ + __sync_add_and_fetch(&hits, 1); + return 0; +} diff --git a/tools/testing/selftests/bpf/test_bpftool_synctypes.py b/tools/testing/selftests/bpf/test_bpftool_synctypes.py index be54b7335a76..6bf21e47882a 100755 --- a/tools/testing/selftests/bpf/test_bpftool_synctypes.py +++ b/tools/testing/selftests/bpf/test_bpftool_synctypes.py @@ -9,7 +9,15 @@ import os, sys LINUX_ROOT = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, os.pardir, os.pardir, os.pardir)) -BPFTOOL_DIR = os.path.join(LINUX_ROOT, 'tools/bpf/bpftool') +BPFTOOL_DIR = os.getenv('BPFTOOL_DIR', + os.path.join(LINUX_ROOT, 'tools/bpf/bpftool')) +BPFTOOL_BASHCOMP_DIR = os.getenv('BPFTOOL_BASHCOMP_DIR', + os.path.join(BPFTOOL_DIR, 'bash-completion')) +BPFTOOL_DOC_DIR = os.getenv('BPFTOOL_DOC_DIR', + os.path.join(BPFTOOL_DIR, 'Documentation')) +INCLUDE_DIR = os.getenv('INCLUDE_DIR', + os.path.join(LINUX_ROOT, 'tools/include')) + retval = 0 class BlockParser(object): @@ -242,12 +250,6 @@ class FileExtractor(object): end_marker = re.compile('}\\\\n') return self.__get_description_list(start_marker, pattern, end_marker) - def default_options(self): - """ - Return the default options contained in HELP_SPEC_OPTIONS - """ - return { '-j', '--json', '-p', '--pretty', '-d', '--debug' } - def get_bashcomp_list(self, block_name): """ Search for and parse a list of type names from a variable in bash @@ -274,7 +276,56 @@ class SourceFileExtractor(FileExtractor): defined in children classes. """ def get_options(self): - return self.default_options().union(self.get_help_list_macro('HELP_SPEC_OPTIONS')) + return self.get_help_list_macro('HELP_SPEC_OPTIONS') + +class MainHeaderFileExtractor(SourceFileExtractor): + """ + An extractor for bpftool's main.h + """ + filename = os.path.join(BPFTOOL_DIR, 'main.h') + + def get_common_options(self): + """ + Parse the list of common options in main.h (options that apply to all + commands), which looks to the lists of options in other source files + but has different start and end markers: + + "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug} | {-l|--legacy}" + + Return a set containing all options, such as: + + {'-p', '-d', '--legacy', '--pretty', '--debug', '--json', '-l', '-j'} + """ + start_marker = re.compile(f'"OPTIONS :=') + pattern = re.compile('([\w-]+) ?(?:\||}[ }\]"])') + end_marker = re.compile('#define') + + parser = InlineListParser(self.reader) + parser.search_block(start_marker) + return parser.parse(pattern, end_marker) + +class ManSubstitutionsExtractor(SourceFileExtractor): + """ + An extractor for substitutions.rst + """ + filename = os.path.join(BPFTOOL_DOC_DIR, 'substitutions.rst') + + def get_common_options(self): + """ + Parse the list of common options in substitutions.rst (options that + apply to all commands). + + Return a set containing all options, such as: + + {'-p', '-d', '--legacy', '--pretty', '--debug', '--json', '-l', '-j'} + """ + start_marker = re.compile('\|COMMON_OPTIONS\| replace:: {') + pattern = re.compile('\*\*([\w/-]+)\*\*') + end_marker = re.compile('}$') + + parser = InlineListParser(self.reader) + parser.search_block(start_marker) + return parser.parse(pattern, end_marker) class ProgFileExtractor(SourceFileExtractor): """ @@ -350,7 +401,7 @@ class BpfHeaderExtractor(FileExtractor): """ An extractor for the UAPI BPF header. """ - filename = os.path.join(LINUX_ROOT, 'tools/include/uapi/linux/bpf.h') + filename = os.path.join(INCLUDE_DIR, 'uapi/linux/bpf.h') def get_prog_types(self): return self.get_enum('bpf_prog_type') @@ -374,7 +425,7 @@ class ManProgExtractor(ManPageExtractor): """ An extractor for bpftool-prog.rst. """ - filename = os.path.join(BPFTOOL_DIR, 'Documentation/bpftool-prog.rst') + filename = os.path.join(BPFTOOL_DOC_DIR, 'bpftool-prog.rst') def get_attach_types(self): return self.get_rst_list('ATTACH_TYPE') @@ -383,7 +434,7 @@ class ManMapExtractor(ManPageExtractor): """ An extractor for bpftool-map.rst. """ - filename = os.path.join(BPFTOOL_DIR, 'Documentation/bpftool-map.rst') + filename = os.path.join(BPFTOOL_DOC_DIR, 'bpftool-map.rst') def get_map_types(self): return self.get_rst_list('TYPE') @@ -392,7 +443,7 @@ class ManCgroupExtractor(ManPageExtractor): """ An extractor for bpftool-cgroup.rst. """ - filename = os.path.join(BPFTOOL_DIR, 'Documentation/bpftool-cgroup.rst') + filename = os.path.join(BPFTOOL_DOC_DIR, 'bpftool-cgroup.rst') def get_attach_types(self): return self.get_rst_list('ATTACH_TYPE') @@ -411,7 +462,7 @@ class BashcompExtractor(FileExtractor): """ An extractor for bpftool's bash completion file. """ - filename = os.path.join(BPFTOOL_DIR, 'bash-completion/bpftool') + filename = os.path.join(BPFTOOL_BASHCOMP_DIR, 'bpftool') def get_prog_attach_types(self): return self.get_bashcomp_list('BPFTOOL_PROG_ATTACH_TYPES') @@ -562,7 +613,7 @@ def main(): help_cmd_options = source_info.get_options() source_info.close() - man_cmd_info = ManGenericExtractor(os.path.join('Documentation', 'bpftool-' + cmd + '.rst')) + man_cmd_info = ManGenericExtractor(os.path.join(BPFTOOL_DOC_DIR, 'bpftool-' + cmd + '.rst')) man_cmd_options = man_cmd_info.get_options() man_cmd_info.close() @@ -573,13 +624,26 @@ def main(): help_main_options = source_main_info.get_options() source_main_info.close() - man_main_info = ManGenericExtractor(os.path.join('Documentation', 'bpftool.rst')) + man_main_info = ManGenericExtractor(os.path.join(BPFTOOL_DOC_DIR, 'bpftool.rst')) man_main_options = man_main_info.get_options() man_main_info.close() verify(help_main_options, man_main_options, f'Comparing {source_main_info.filename} (do_help() OPTIONS) and {man_main_info.filename} (OPTIONS):') + # Compare common options (options that apply to all commands) + + main_hdr_info = MainHeaderFileExtractor() + source_common_options = main_hdr_info.get_common_options() + main_hdr_info.close() + + man_substitutions = ManSubstitutionsExtractor() + man_common_options = man_substitutions.get_common_options() + man_substitutions.close() + + verify(source_common_options, man_common_options, + f'Comparing common options from {main_hdr_info.filename} (HELP_SPEC_OPTIONS) and {man_substitutions.filename}:') + sys.exit(retval) if __name__ == "__main__": diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c index a63787e7bb1a..5b8314cd77fd 100644 --- a/tools/testing/selftests/bpf/test_cgroup_storage.c +++ b/tools/testing/selftests/bpf/test_cgroup_storage.c @@ -51,15 +51,15 @@ int main(int argc, char **argv) goto err; } - map_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, sizeof(key), - sizeof(value), 0, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, sizeof(key), + sizeof(value), 0, NULL); if (map_fd < 0) { printf("Failed to create map: %s\n", strerror(errno)); goto out; } - percpu_map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, - sizeof(key), sizeof(value), 0, 0); + percpu_map_fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL, + sizeof(key), sizeof(value), 0, NULL); if (percpu_map_fd < 0) { printf("Failed to create map: %s\n", strerror(errno)); goto out; diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index 006be3963977..baa3e3ecae82 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -208,6 +208,7 @@ static void test_lpm_order(void) static void test_lpm_map(int keysize) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); size_t i, j, n_matches, n_matches_after_delete, n_nodes, n_lookups; struct tlpm_node *t, *list = NULL; struct bpf_lpm_trie_key *key; @@ -233,11 +234,11 @@ static void test_lpm_map(int keysize) key = alloca(sizeof(*key) + keysize); memset(key, 0, sizeof(*key) + keysize); - map = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, + map = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, sizeof(*key) + keysize, keysize + 1, 4096, - BPF_F_NO_PREALLOC); + &opts); assert(map >= 0); for (i = 0; i < n_nodes; ++i) { @@ -329,6 +330,7 @@ static void test_lpm_map(int keysize) static void test_lpm_ipaddr(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); struct bpf_lpm_trie_key *key_ipv4; struct bpf_lpm_trie_key *key_ipv6; size_t key_size_ipv4; @@ -342,14 +344,14 @@ static void test_lpm_ipaddr(void) key_ipv4 = alloca(key_size_ipv4); key_ipv6 = alloca(key_size_ipv6); - map_fd_ipv4 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, + map_fd_ipv4 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size_ipv4, sizeof(value), - 100, BPF_F_NO_PREALLOC); + 100, &opts); assert(map_fd_ipv4 >= 0); - map_fd_ipv6 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, + map_fd_ipv6 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size_ipv6, sizeof(value), - 100, BPF_F_NO_PREALLOC); + 100, &opts); assert(map_fd_ipv6 >= 0); /* Fill data some IPv4 and IPv6 address ranges */ @@ -423,6 +425,7 @@ static void test_lpm_ipaddr(void) static void test_lpm_delete(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); struct bpf_lpm_trie_key *key; size_t key_size; int map_fd; @@ -431,9 +434,9 @@ static void test_lpm_delete(void) key_size = sizeof(*key) + sizeof(__u32); key = alloca(key_size); - map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, sizeof(value), - 100, BPF_F_NO_PREALLOC); + 100, &opts); assert(map_fd >= 0); /* Add nodes: @@ -535,6 +538,7 @@ static void test_lpm_delete(void) static void test_lpm_get_next_key(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); struct bpf_lpm_trie_key *key_p, *next_key_p; size_t key_size; __u32 value = 0; @@ -544,8 +548,7 @@ static void test_lpm_get_next_key(void) key_p = alloca(key_size); next_key_p = alloca(key_size); - map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, sizeof(value), - 100, BPF_F_NO_PREALLOC); + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, sizeof(value), 100, &opts); assert(map_fd >= 0); /* empty tree. get_next_key should return ENOENT */ @@ -753,6 +756,7 @@ static void setup_lpm_mt_test_info(struct lpm_mt_test_info *info, int map_fd) static void test_lpm_multi_thread(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); struct lpm_mt_test_info info[4]; size_t key_size, value_size; pthread_t thread_id[4]; @@ -762,8 +766,7 @@ static void test_lpm_multi_thread(void) /* create a trie */ value_size = sizeof(__u32); key_size = sizeof(struct bpf_lpm_trie_key) + value_size; - map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, value_size, - 100, BPF_F_NO_PREALLOC); + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts); /* create 4 threads to test update, delete, lookup and get_next_key */ setup_lpm_mt_test_info(&info[0], map_fd); diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 7f3d1d8460b4..b9f1bbbc8aba 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -28,13 +28,14 @@ static int nr_cpus; static int create_map(int map_type, int map_flags, unsigned int size) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags); int map_fd; - map_fd = bpf_create_map(map_type, sizeof(unsigned long long), - sizeof(unsigned long long), size, map_flags); + map_fd = bpf_map_create(map_type, NULL, sizeof(unsigned long long), + sizeof(unsigned long long), size, &opts); if (map_fd == -1) - perror("bpf_create_map"); + perror("bpf_map_create"); return map_fd; } @@ -42,7 +43,6 @@ static int create_map(int map_type, int map_flags, unsigned int size) static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key, void *value) { - struct bpf_create_map_attr map; struct bpf_insn insns[] = { BPF_LD_MAP_VALUE(BPF_REG_9, 0, 0), BPF_LD_MAP_FD(BPF_REG_1, fd), @@ -63,13 +63,7 @@ static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key, int mfd, pfd, ret, zero = 0; __u32 retval = 0; - memset(&map, 0, sizeof(map)); - map.map_type = BPF_MAP_TYPE_ARRAY; - map.key_size = sizeof(int); - map.value_size = sizeof(unsigned long long); - map.max_entries = 1; - - mfd = bpf_create_map_xattr(&map); + mfd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(__u64), 1, NULL); if (mfd < 0) return -1; diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 8b31bc1a801d..f4cd658bbe00 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -33,15 +33,14 @@ static int skips; -static int map_flags; +static struct bpf_map_create_opts map_opts = { .sz = sizeof(map_opts) }; static void test_hashmap(unsigned int task, void *data) { long long key, next_key, first_key, value; int fd; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - 2, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), 2, &map_opts); if (fd < 0) { printf("Failed to create hashmap '%s'!\n", strerror(errno)); exit(1); @@ -138,8 +137,7 @@ static void test_hashmap_sizes(unsigned int task, void *data) for (i = 1; i <= 512; i <<= 1) for (j = 1; j <= 1 << 18; j <<= 1) { - fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j, - 2, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, i, j, 2, &map_opts); if (fd < 0) { if (errno == ENOMEM) return; @@ -160,8 +158,8 @@ static void test_hashmap_percpu(unsigned int task, void *data) int expected_key_mask = 0; int fd, i; - fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), - sizeof(bpf_percpu(value, 0)), 2, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_HASH, NULL, sizeof(key), + sizeof(bpf_percpu(value, 0)), 2, &map_opts); if (fd < 0) { printf("Failed to create hashmap '%s'!\n", strerror(errno)); exit(1); @@ -272,11 +270,11 @@ static int helper_fill_hashmap(int max_entries) int i, fd, ret; long long key, value; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - max_entries, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), + max_entries, &map_opts); CHECK(fd < 0, "failed to create hashmap", - "err: %s, flags: 0x%x\n", strerror(errno), map_flags); + "err: %s, flags: 0x%x\n", strerror(errno), map_opts.map_flags); for (i = 0; i < max_entries; i++) { key = i; value = key; @@ -332,8 +330,8 @@ static void test_hashmap_zero_seed(void) int i, first, second, old_flags; long long key, next_first, next_second; - old_flags = map_flags; - map_flags |= BPF_F_ZERO_SEED; + old_flags = map_opts.map_flags; + map_opts.map_flags |= BPF_F_ZERO_SEED; first = helper_fill_hashmap(3); second = helper_fill_hashmap(3); @@ -355,7 +353,7 @@ static void test_hashmap_zero_seed(void) key = next_first; } - map_flags = old_flags; + map_opts.map_flags = old_flags; close(first); close(second); } @@ -365,8 +363,7 @@ static void test_arraymap(unsigned int task, void *data) int key, next_key, fd; long long value; - fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), - 2, 0); + fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(key), sizeof(value), 2, NULL); if (fd < 0) { printf("Failed to create arraymap '%s'!\n", strerror(errno)); exit(1); @@ -421,8 +418,8 @@ static void test_arraymap_percpu(unsigned int task, void *data) BPF_DECLARE_PERCPU(long, values); int key, next_key, fd, i; - fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), - sizeof(bpf_percpu(values, 0)), 2, 0); + fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, sizeof(key), + sizeof(bpf_percpu(values, 0)), 2, NULL); if (fd < 0) { printf("Failed to create arraymap '%s'!\n", strerror(errno)); exit(1); @@ -484,8 +481,8 @@ static void test_arraymap_percpu_many_keys(void) unsigned int nr_keys = 2000; int key, fd, i; - fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), - sizeof(bpf_percpu(values, 0)), nr_keys, 0); + fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, sizeof(key), + sizeof(bpf_percpu(values, 0)), nr_keys, NULL); if (fd < 0) { printf("Failed to create per-cpu arraymap '%s'!\n", strerror(errno)); @@ -516,8 +513,7 @@ static void test_devmap(unsigned int task, void *data) int fd; __u32 key, value; - fd = bpf_create_map(BPF_MAP_TYPE_DEVMAP, sizeof(key), sizeof(value), - 2, 0); + fd = bpf_map_create(BPF_MAP_TYPE_DEVMAP, NULL, sizeof(key), sizeof(value), 2, NULL); if (fd < 0) { printf("Failed to create devmap '%s'!\n", strerror(errno)); exit(1); @@ -531,8 +527,7 @@ static void test_devmap_hash(unsigned int task, void *data) int fd; __u32 key, value; - fd = bpf_create_map(BPF_MAP_TYPE_DEVMAP_HASH, sizeof(key), sizeof(value), - 2, 0); + fd = bpf_map_create(BPF_MAP_TYPE_DEVMAP_HASH, NULL, sizeof(key), sizeof(value), 2, NULL); if (fd < 0) { printf("Failed to create devmap_hash '%s'!\n", strerror(errno)); exit(1); @@ -552,14 +547,12 @@ static void test_queuemap(unsigned int task, void *data) vals[i] = rand(); /* Invalid key size */ - fd = bpf_create_map(BPF_MAP_TYPE_QUEUE, 4, sizeof(val), MAP_SIZE, - map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_QUEUE, NULL, 4, sizeof(val), MAP_SIZE, &map_opts); assert(fd < 0 && errno == EINVAL); - fd = bpf_create_map(BPF_MAP_TYPE_QUEUE, 0, sizeof(val), MAP_SIZE, - map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_QUEUE, NULL, 0, sizeof(val), MAP_SIZE, &map_opts); /* Queue map does not support BPF_F_NO_PREALLOC */ - if (map_flags & BPF_F_NO_PREALLOC) { + if (map_opts.map_flags & BPF_F_NO_PREALLOC) { assert(fd < 0 && errno == EINVAL); return; } @@ -610,14 +603,12 @@ static void test_stackmap(unsigned int task, void *data) vals[i] = rand(); /* Invalid key size */ - fd = bpf_create_map(BPF_MAP_TYPE_STACK, 4, sizeof(val), MAP_SIZE, - map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_STACK, NULL, 4, sizeof(val), MAP_SIZE, &map_opts); assert(fd < 0 && errno == EINVAL); - fd = bpf_create_map(BPF_MAP_TYPE_STACK, 0, sizeof(val), MAP_SIZE, - map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_STACK, NULL, 0, sizeof(val), MAP_SIZE, &map_opts); /* Stack map does not support BPF_F_NO_PREALLOC */ - if (map_flags & BPF_F_NO_PREALLOC) { + if (map_opts.map_flags & BPF_F_NO_PREALLOC) { assert(fd < 0 && errno == EINVAL); return; } @@ -744,9 +735,9 @@ static void test_sockmap(unsigned int tasks, void *data) } /* Test sockmap with connected sockets */ - fd = bpf_create_map(BPF_MAP_TYPE_SOCKMAP, + fd = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(key), sizeof(value), - 6, 0); + 6, NULL); if (fd < 0) { if (!bpf_probe_map_type(BPF_MAP_TYPE_SOCKMAP, 0)) { printf("%s SKIP (unsupported map type BPF_MAP_TYPE_SOCKMAP)\n", @@ -1168,8 +1159,7 @@ static void test_map_in_map(void) obj = bpf_object__open(MAPINMAP_PROG); - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int), sizeof(int), - 2, 0); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int), sizeof(int), 2, NULL); if (fd < 0) { printf("Failed to create hashmap '%s'!\n", strerror(errno)); exit(1); @@ -1315,8 +1305,8 @@ static void test_map_large(void) } key; int fd, i, value; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - MAP_SIZE, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), + MAP_SIZE, &map_opts); if (fd < 0) { printf("Failed to create large map '%s'!\n", strerror(errno)); exit(1); @@ -1469,8 +1459,8 @@ static void test_map_parallel(void) int i, fd, key = 0, value = 0; int data[2]; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - MAP_SIZE, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), + MAP_SIZE, &map_opts); if (fd < 0) { printf("Failed to create map for parallel test '%s'!\n", strerror(errno)); @@ -1518,9 +1508,13 @@ static void test_map_parallel(void) static void test_map_rdonly(void) { int fd, key = 0, value = 0; + __u32 old_flags; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - MAP_SIZE, map_flags | BPF_F_RDONLY); + old_flags = map_opts.map_flags; + map_opts.map_flags |= BPF_F_RDONLY; + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), + MAP_SIZE, &map_opts); + map_opts.map_flags = old_flags; if (fd < 0) { printf("Failed to create map for read only test '%s'!\n", strerror(errno)); @@ -1543,9 +1537,13 @@ static void test_map_rdonly(void) static void test_map_wronly_hash(void) { int fd, key = 0, value = 0; + __u32 old_flags; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - MAP_SIZE, map_flags | BPF_F_WRONLY); + old_flags = map_opts.map_flags; + map_opts.map_flags |= BPF_F_WRONLY; + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), + MAP_SIZE, &map_opts); + map_opts.map_flags = old_flags; if (fd < 0) { printf("Failed to create map for write only test '%s'!\n", strerror(errno)); @@ -1567,13 +1565,17 @@ static void test_map_wronly_hash(void) static void test_map_wronly_stack_or_queue(enum bpf_map_type map_type) { int fd, value = 0; + __u32 old_flags; + assert(map_type == BPF_MAP_TYPE_QUEUE || map_type == BPF_MAP_TYPE_STACK); - fd = bpf_create_map(map_type, 0, sizeof(value), MAP_SIZE, - map_flags | BPF_F_WRONLY); + old_flags = map_opts.map_flags; + map_opts.map_flags |= BPF_F_WRONLY; + fd = bpf_map_create(map_type, NULL, 0, sizeof(value), MAP_SIZE, &map_opts); + map_opts.map_flags = old_flags; /* Stack/Queue maps do not support BPF_F_NO_PREALLOC */ - if (map_flags & BPF_F_NO_PREALLOC) { + if (map_opts.map_flags & BPF_F_NO_PREALLOC) { assert(fd < 0 && errno == EINVAL); return; } @@ -1700,8 +1702,8 @@ static void test_reuseport_array(void) __u32 fds_idx = 0; int fd; - map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, - sizeof(__u32), sizeof(__u64), array_size, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, NULL, + sizeof(__u32), sizeof(__u64), array_size, NULL); CHECK(map_fd < 0, "reuseport array create", "map_fd:%d, errno:%d\n", map_fd, errno); @@ -1837,8 +1839,8 @@ static void test_reuseport_array(void) close(map_fd); /* Test 32 bit fd */ - map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, - sizeof(__u32), sizeof(__u32), array_size, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, NULL, + sizeof(__u32), sizeof(__u32), array_size, NULL); CHECK(map_fd < 0, "reuseport array create", "map_fd:%d, errno:%d\n", map_fd, errno); prepare_reuseport_grp(SOCK_STREAM, map_fd, sizeof(__u32), &fd64, @@ -1896,10 +1898,10 @@ int main(void) libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - map_flags = 0; + map_opts.map_flags = 0; run_all_tests(); - map_flags = BPF_F_NO_PREALLOC; + map_opts.map_flags = BPF_F_NO_PREALLOC; run_all_tests(); #define DEFINE_TEST(name) test_##name(); diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index c65986bd9d07..296928948bb9 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -473,11 +473,11 @@ static struct prog_test_def prog_test_defs[] = { #include <prog_tests/tests.h> #undef DEFINE_TEST }; -const int prog_test_cnt = ARRAY_SIZE(prog_test_defs); +static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs); const char *argp_program_version = "test_progs 0.1"; const char *argp_program_bug_address = "<bpf@vger.kernel.org>"; -const char argp_program_doc[] = "BPF selftests test runner"; +static const char argp_program_doc[] = "BPF selftests test runner"; enum ARG_KEYS { ARG_TEST_NUM = 'n', @@ -939,7 +939,7 @@ static void *dispatch_thread(void *ctx) { struct dispatch_data *data = ctx; int sock_fd; - FILE *log_fd = NULL; + FILE *log_fp = NULL; sock_fd = data->sock_fd; @@ -1002,8 +1002,8 @@ static void *dispatch_thread(void *ctx) /* collect all logs */ if (msg_test_done.test_done.have_log) { - log_fd = open_memstream(&result->log_buf, &result->log_cnt); - if (!log_fd) + log_fp = open_memstream(&result->log_buf, &result->log_cnt); + if (!log_fp) goto error; while (true) { @@ -1014,12 +1014,12 @@ static void *dispatch_thread(void *ctx) if (msg_log.type != MSG_TEST_LOG) goto error; - fprintf(log_fd, "%s", msg_log.test_log.log_buf); + fprintf(log_fp, "%s", msg_log.test_log.log_buf); if (msg_log.test_log.is_last) break; } - fclose(log_fd); - log_fd = NULL; + fclose(log_fp); + log_fp = NULL; } /* output log */ { @@ -1045,8 +1045,8 @@ error: if (env.debug) fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno)); - if (log_fd) - fclose(log_fd); + if (log_fp) + fclose(log_fp); done: { struct msg msg_exit; @@ -1198,11 +1198,11 @@ static int server_main(void) env.sub_succ_cnt += result->sub_succ_cnt; } + print_all_error_logs(); + fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); - print_all_error_logs(); - /* reap all workers */ for (i = 0; i < env.workers; i++) { int wstatus, pid; @@ -1484,11 +1484,11 @@ int main(int argc, char **argv) if (env.list_test_names) goto out; + print_all_error_logs(); + fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); - print_all_error_logs(); - close(env.saved_netns_fd); out: if (!env.list_test_names && env.has_testmod) diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index 05c9e4944c01..f0c8d05ba6d1 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -663,23 +663,36 @@ static int load_insns(const struct sock_addr_test *test, static int load_path(const struct sock_addr_test *test, const char *path) { - struct bpf_prog_load_attr attr; struct bpf_object *obj; - int prog_fd; + struct bpf_program *prog; + int err; - memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); - attr.file = path; - attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; - attr.expected_attach_type = test->expected_attach_type; - attr.prog_flags = BPF_F_TEST_RND_HI32; + obj = bpf_object__open_file(path, NULL); + err = libbpf_get_error(obj); + if (err) { + log_err(">>> Opening BPF object (%s) error.\n", path); + return -1; + } + + prog = bpf_object__next_program(obj, NULL); + if (!prog) + goto err_out; - if (bpf_prog_load_xattr(&attr, &obj, &prog_fd)) { + bpf_program__set_type(prog, BPF_PROG_TYPE_CGROUP_SOCK_ADDR); + bpf_program__set_expected_attach_type(prog, test->expected_attach_type); + bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32); + + err = bpf_object__load(obj); + if (err) { if (test->expected_result != LOAD_REJECT) log_err(">>> Loading program (%s) error.\n", path); - return -1; + goto err_out; } - return prog_fd; + return bpf_program__fd(prog); +err_out: + bpf_object__close(obj); + return -1; } static int bind4_prog_load(const struct sock_addr_test *test) diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c index 5c7bea525626..0851c42ee31c 100644 --- a/tools/testing/selftests/bpf/test_tag.c +++ b/tools/testing/selftests/bpf/test_tag.c @@ -185,11 +185,12 @@ static void do_test(uint32_t *tests, int start_insns, int fd_map, int main(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); uint32_t tests = 0; int i, fd_map; - fd_map = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int), - sizeof(int), 1, BPF_F_NO_PREALLOC); + fd_map = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int), + sizeof(int), 1, &opts); assert(fd_map > 0); for (i = 0; i < 5; i++) { diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 9fbef396a716..4f1fe8af10a4 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -54,7 +54,7 @@ #define MAX_INSNS BPF_MAXINSNS #define MAX_TEST_INSNS 1000000 #define MAX_FIXUPS 8 -#define MAX_NR_MAPS 21 +#define MAX_NR_MAPS 22 #define MAX_TEST_RUNS 8 #define POINTER_VALUE 0xcafe4all #define TEST_DATA_LEN 64 @@ -462,11 +462,11 @@ static int __create_map(uint32_t type, uint32_t size_key, uint32_t size_value, uint32_t max_elem, uint32_t extra_flags) { + LIBBPF_OPTS(bpf_map_create_opts, opts); int fd; - fd = bpf_create_map(type, size_key, size_value, max_elem, - (type == BPF_MAP_TYPE_HASH ? - BPF_F_NO_PREALLOC : 0) | extra_flags); + opts.map_flags = (type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0) | extra_flags; + fd = bpf_map_create(type, NULL, size_key, size_value, max_elem, &opts); if (fd < 0) { if (skip_unsupported_map(type)) return -1; @@ -522,8 +522,8 @@ static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem, { int mfd, p1fd, p2fd, p3fd; - mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), - sizeof(int), max_elem, 0); + mfd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, NULL, sizeof(int), + sizeof(int), max_elem, NULL); if (mfd < 0) { if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY)) return -1; @@ -553,10 +553,11 @@ err: static int create_map_in_map(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts); int inner_map_fd, outer_map_fd; - inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), - sizeof(int), 1, 0); + inner_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), + sizeof(int), 1, NULL); if (inner_map_fd < 0) { if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY)) return -1; @@ -564,8 +565,9 @@ static int create_map_in_map(void) return inner_map_fd; } - outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, - sizeof(int), inner_map_fd, 1, 0); + opts.inner_map_fd = inner_map_fd; + outer_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, + sizeof(int), sizeof(int), 1, &opts); if (outer_map_fd < 0) { if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS)) return -1; @@ -584,8 +586,8 @@ static int create_cgroup_storage(bool percpu) BPF_MAP_TYPE_CGROUP_STORAGE; int fd; - fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key), - TEST_DATA_LEN, 0, 0); + fd = bpf_map_create(type, NULL, sizeof(struct bpf_cgroup_storage_key), + TEST_DATA_LEN, 0, NULL); if (fd < 0) { if (skip_unsupported_map(type)) return -1; @@ -652,7 +654,7 @@ static int load_btf(void) memcpy(ptr, btf_str_sec, hdr.str_len); ptr += hdr.str_len; - btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0); + btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, NULL); free(raw_btf); if (btf_fd < 0) return -1; @@ -661,22 +663,17 @@ static int load_btf(void) static int create_map_spin_lock(void) { - struct bpf_create_map_attr attr = { - .name = "test_map", - .map_type = BPF_MAP_TYPE_ARRAY, - .key_size = 4, - .value_size = 8, - .max_entries = 1, + LIBBPF_OPTS(bpf_map_create_opts, opts, .btf_key_type_id = 1, .btf_value_type_id = 3, - }; + ); int fd, btf_fd; btf_fd = load_btf(); if (btf_fd < 0) return -1; - attr.btf_fd = btf_fd; - fd = bpf_create_map_xattr(&attr); + opts.btf_fd = btf_fd; + fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 8, 1, &opts); if (fd < 0) printf("Failed to create map with spin_lock\n"); return fd; @@ -684,24 +681,19 @@ static int create_map_spin_lock(void) static int create_sk_storage_map(void) { - struct bpf_create_map_attr attr = { - .name = "test_map", - .map_type = BPF_MAP_TYPE_SK_STORAGE, - .key_size = 4, - .value_size = 8, - .max_entries = 0, + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC, .btf_key_type_id = 1, .btf_value_type_id = 3, - }; + ); int fd, btf_fd; btf_fd = load_btf(); if (btf_fd < 0) return -1; - attr.btf_fd = btf_fd; - fd = bpf_create_map_xattr(&attr); - close(attr.btf_fd); + opts.btf_fd = btf_fd; + fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "test_map", 4, 8, 0, &opts); + close(opts.btf_fd); if (fd < 0) printf("Failed to create sk_storage_map\n"); return fd; diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index 52c2f24e0898..795b6798ccee 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -88,12 +88,15 @@ int extra_prog_load_log_flags = 0; int bpf_prog_test_load(const char *file, enum bpf_prog_type type, struct bpf_object **pobj, int *prog_fd) { - struct bpf_object_load_attr attr = {}; + LIBBPF_OPTS(bpf_object_open_opts, opts, + .kernel_log_level = extra_prog_load_log_flags, + ); struct bpf_object *obj; struct bpf_program *prog; + __u32 flags; int err; - obj = bpf_object__open(file); + obj = bpf_object__open_file(file, &opts); if (!obj) return -errno; @@ -106,11 +109,10 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type, if (type != BPF_PROG_TYPE_UNSPEC) bpf_program__set_type(prog, type); - bpf_program__set_extra_flags(prog, BPF_F_TEST_RND_HI32); + flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32; + bpf_program__set_flags(prog, flags); - attr.obj = obj; - attr.log_level = extra_prog_load_log_flags; - err = bpf_object__load_xattr(&attr); + err = bpf_object__load(obj); if (err) goto err_out; diff --git a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c index c22dc83a41fd..b39665f33524 100644 --- a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c +++ b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c @@ -138,6 +138,8 @@ BPF_EXIT_INSN(), }, .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 leaks addr into mem", }, { "Dest pointer in r0 - succeed", @@ -156,4 +158,88 @@ BPF_EXIT_INSN(), }, .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 leaks addr into mem", +}, +{ + "Dest pointer in r0 - succeed, check 2", + .insns = { + /* r0 = &val */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + /* val = r0; */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + /* r5 = &val */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + /* r0 = atomic_cmpxchg(&val, r0, r5); */ + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8), + /* r1 = *r0 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 leaks addr into mem", +}, +{ + "Dest pointer in r0 - succeed, check 3", + .insns = { + /* r0 = &val */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + /* val = r0; */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + /* r5 = &val */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + /* r0 = atomic_cmpxchg(&val, r0, r5); */ + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid size of register fill", + .errstr_unpriv = "R0 leaks addr into mem", +}, +{ + "Dest pointer in r0 - succeed, check 4", + .insns = { + /* r0 = &val */ + BPF_MOV32_REG(BPF_REG_0, BPF_REG_10), + /* val = r0; */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), + /* r5 = &val */ + BPF_MOV32_REG(BPF_REG_5, BPF_REG_10), + /* r0 = atomic_cmpxchg(&val, r0, r5); */ + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8), + /* r1 = *r10 */ + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R10 partial copy of pointer", +}, +{ + "Dest pointer in r0 - succeed, check 5", + .insns = { + /* r0 = &val */ + BPF_MOV32_REG(BPF_REG_0, BPF_REG_10), + /* val = r0; */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), + /* r5 = &val */ + BPF_MOV32_REG(BPF_REG_5, BPF_REG_10), + /* r0 = atomic_cmpxchg(&val, r0, r5); */ + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8), + /* r1 = *r0 */ + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 invalid mem access", + .errstr_unpriv = "R10 partial copy of pointer", }, diff --git a/tools/testing/selftests/bpf/verifier/atomic_fetch.c b/tools/testing/selftests/bpf/verifier/atomic_fetch.c index 3bc9ff7a860b..5bf03fb4fa2b 100644 --- a/tools/testing/selftests/bpf/verifier/atomic_fetch.c +++ b/tools/testing/selftests/bpf/verifier/atomic_fetch.c @@ -1,3 +1,97 @@ +{ + "atomic dw/fetch and address leakage of (map ptr & -1) via stack slot", + .insns = { + BPF_LD_IMM64(BPF_REG_1, -1), + BPF_LD_MAP_FD(BPF_REG_8, 0), + BPF_LD_MAP_FD(BPF_REG_9, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0), + BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 2, 4 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "leaking pointer from stack off -8", +}, +{ + "atomic dw/fetch and address leakage of (map ptr & -1) via returned value", + .insns = { + BPF_LD_IMM64(BPF_REG_1, -1), + BPF_LD_MAP_FD(BPF_REG_8, 0), + BPF_LD_MAP_FD(BPF_REG_9, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0), + BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 2, 4 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "leaking pointer from stack off -8", +}, +{ + "atomic w/fetch and address leakage of (map ptr & -1) via stack slot", + .insns = { + BPF_LD_IMM64(BPF_REG_1, -1), + BPF_LD_MAP_FD(BPF_REG_8, 0), + BPF_LD_MAP_FD(BPF_REG_9, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0), + BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 2, 4 }, + .result = REJECT, + .errstr = "invalid size of register fill", +}, +{ + "atomic w/fetch and address leakage of (map ptr & -1) via returned value", + .insns = { + BPF_LD_IMM64(BPF_REG_1, -1), + BPF_LD_MAP_FD(BPF_REG_8, 0), + BPF_LD_MAP_FD(BPF_REG_9, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0), + BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 2, 4 }, + .result = REJECT, + .errstr = "invalid size of register fill", +}, #define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \ { \ "atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \ diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c index 7e50cb80873a..682519769fe3 100644 --- a/tools/testing/selftests/bpf/verifier/search_pruning.c +++ b/tools/testing/selftests/bpf/verifier/search_pruning.c @@ -133,6 +133,77 @@ .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, { + "precision tracking for u32 spill/fill", + .insns = { + BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), + BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), + BPF_MOV32_IMM(BPF_REG_6, 32), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_MOV32_IMM(BPF_REG_6, 4), + /* Additional insns to introduce a pruning point. */ + BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_MOV64_IMM(BPF_REG_3, 0), + /* u32 spill/fill */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8), + /* out-of-bound map value access for r6=32 */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 15 }, + .result = REJECT, + .errstr = "R0 min value is outside of the allowed memory range", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "precision tracking for u32 spills, u64 fill", + .insns = { + BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_MOV32_IMM(BPF_REG_7, 0xffffffff), + /* Additional insns to introduce a pruning point. */ + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0), + /* u32 spills, u64 fill */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8), + /* if r8 != X goto pc+1 r8 known in fallthrough branch */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1), + BPF_MOV64_IMM(BPF_REG_3, 1), + /* if r8 == X goto pc+1 condition always true on first + * traversal, so starts backtracking to mark r8 as requiring + * precision. r7 marked as needing precision. r6 not marked + * since it's not tracked. + */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1), + /* fails if r8 correctly marked unknown after fill. */ + BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "div by zero", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ "allocated_stack", .insns = { BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c index 7ab3de108761..6c907144311f 100644 --- a/tools/testing/selftests/bpf/verifier/spill_fill.c +++ b/tools/testing/selftests/bpf/verifier/spill_fill.c @@ -176,6 +176,38 @@ .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { + "Spill u32 const scalars. Refill as u64. Offset to skb->data", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + /* r6 = 0 */ + BPF_MOV32_IMM(BPF_REG_6, 0), + /* r7 = 20 */ + BPF_MOV32_IMM(BPF_REG_7, 20), + /* *(u32 *)(r10 -4) = r6 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4), + /* *(u32 *)(r10 -8) = r7 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8), + /* r4 = *(u64 *)(r10 -8) */ + BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8), + /* r0 = r2 */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */ + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ "Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c index 2debba4e8a3a..4d347bc53aa2 100644 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c @@ -1078,6 +1078,29 @@ .errstr_unpriv = "R0 pointer -= pointer prohibited", }, { + "map access: trying to leak tained dst reg", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV32_IMM(BPF_REG_1, 0xFFFFFFFF), + BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 4 }, + .result = REJECT, + .errstr = "math between map_value pointer and 4294967295 is not allowed", +}, +{ "32bit pkt_ptr -= scalar", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c index bfb97383e6b5..b4ec228eb95d 100644 --- a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c +++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c @@ -35,7 +35,7 @@ .prog_type = BPF_PROG_TYPE_XDP, }, { - "XDP pkt read, pkt_data' > pkt_end, good access", + "XDP pkt read, pkt_data' > pkt_end, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -88,6 +88,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_end > pkt_data', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -106,16 +141,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end > pkt_data', bad access 1", + "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -143,6 +178,42 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_end > pkt_data', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end > pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data' < pkt_end, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -161,16 +232,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' < pkt_end, bad access 1", + "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -198,7 +269,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end < pkt_data', good access", + "XDP pkt read, pkt_data' < pkt_end, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end < pkt_data', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -251,6 +358,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_end < pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data' >= pkt_end, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -268,15 +410,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' >= pkt_end, bad access 1", + "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -304,7 +446,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end >= pkt_data', good access", + "XDP pkt read, pkt_data' >= pkt_end, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end >= pkt_data', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -359,7 +535,44 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' <= pkt_end, good access", + "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' <= pkt_end, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -414,6 +627,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_end <= pkt_data', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -431,15 +681,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end <= pkt_data', bad access 1", + "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -467,7 +717,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' > pkt_data, good access", + "XDP pkt read, pkt_end <= pkt_data', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' > pkt_data, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -520,6 +804,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data > pkt_meta', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -538,16 +857,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data > pkt_meta', bad access 1", + "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -575,6 +894,42 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data > pkt_meta', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_meta' < pkt_data, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -593,16 +948,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' < pkt_data, bad access 1", + "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -630,7 +985,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data < pkt_meta', good access", + "XDP pkt read, pkt_meta' < pkt_data, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data < pkt_meta', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -683,6 +1074,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_meta' >= pkt_data, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -700,15 +1126,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' >= pkt_data, bad access 1", + "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -736,7 +1162,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data >= pkt_meta', good access", + "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data >= pkt_meta', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -791,7 +1251,44 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' <= pkt_data, good access", + "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -846,6 +1343,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data <= pkt_meta', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -863,15 +1397,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data <= pkt_meta', bad access 1", + "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -898,3 +1432,37 @@ .prog_type = BPF_PROG_TYPE_XDP, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, +{ + "XDP pkt read, pkt_data <= pkt_meta', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh index 027198768fad..5e43c79ddc6e 100755 --- a/tools/testing/selftests/bpf/vmtest.sh +++ b/tools/testing/selftests/bpf/vmtest.sh @@ -4,17 +4,34 @@ set -u set -e -# This script currently only works for x86_64, as -# it is based on the VM image used by the BPF CI which is -# x86_64. -QEMU_BINARY="${QEMU_BINARY:="qemu-system-x86_64"}" -X86_BZIMAGE="arch/x86/boot/bzImage" +# This script currently only works for x86_64 and s390x, as +# it is based on the VM image used by the BPF CI, which is +# available only for these architectures. +ARCH="$(uname -m)" +case "${ARCH}" in +s390x) + QEMU_BINARY=qemu-system-s390x + QEMU_CONSOLE="ttyS1" + QEMU_FLAGS=(-smp 2) + BZIMAGE="arch/s390/boot/compressed/vmlinux" + ;; +x86_64) + QEMU_BINARY=qemu-system-x86_64 + QEMU_CONSOLE="ttyS0,115200" + QEMU_FLAGS=(-cpu host -smp 8) + BZIMAGE="arch/x86/boot/bzImage" + ;; +*) + echo "Unsupported architecture" + exit 1 + ;; +esac DEFAULT_COMMAND="./test_progs" MOUNT_DIR="mnt" ROOTFS_IMAGE="root.img" OUTPUT_DIR="$HOME/.bpf_selftests" -KCONFIG_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/latest.config" -KCONFIG_API_URL="https://api.github.com/repos/libbpf/libbpf/contents/travis-ci/vmtest/configs/latest.config" +KCONFIG_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/config-latest.${ARCH}" +KCONFIG_API_URL="https://api.github.com/repos/libbpf/libbpf/contents/travis-ci/vmtest/configs/config-latest.${ARCH}" INDEX_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/INDEX" NUM_COMPILE_JOBS="$(nproc)" LOG_FILE_BASE="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S")" @@ -85,7 +102,7 @@ newest_rootfs_version() { { for file in "${!URLS[@]}"; do - if [[ $file =~ ^libbpf-vmtest-rootfs-(.*)\.tar\.zst$ ]]; then + if [[ $file =~ ^"${ARCH}"/libbpf-vmtest-rootfs-(.*)\.tar\.zst$ ]]; then echo "${BASH_REMATCH[1]}" fi done @@ -102,7 +119,7 @@ download_rootfs() exit 1 fi - download "libbpf-vmtest-rootfs-$rootfsversion.tar.zst" | + download "${ARCH}/libbpf-vmtest-rootfs-$rootfsversion.tar.zst" | zstd -d | sudo tar -C "$dir" -x } @@ -224,13 +241,12 @@ EOF -nodefaults \ -display none \ -serial mon:stdio \ - -cpu host \ + "${qemu_flags[@]}" \ -enable-kvm \ - -smp 8 \ -m 4G \ -drive file="${rootfs_img}",format=raw,index=1,media=disk,if=virtio,cache=none \ -kernel "${kernel_bzimage}" \ - -append "root=/dev/vda rw console=ttyS0,115200" + -append "root=/dev/vda rw console=${QEMU_CONSOLE}" } copy_logs() @@ -282,7 +298,7 @@ main() local kernel_checkout=$(realpath "${script_dir}"/../../../../) # By default the script searches for the kernel in the checkout directory but # it also obeys environment variables O= and KBUILD_OUTPUT= - local kernel_bzimage="${kernel_checkout}/${X86_BZIMAGE}" + local kernel_bzimage="${kernel_checkout}/${BZIMAGE}" local command="${DEFAULT_COMMAND}" local update_image="no" local exit_command="poweroff -f" @@ -337,13 +353,13 @@ main() if is_rel_path "${O}"; then O="$(realpath "${PWD}/${O}")" fi - kernel_bzimage="${O}/${X86_BZIMAGE}" + kernel_bzimage="${O}/${BZIMAGE}" make_command="${make_command} O=${O}" elif [[ "${KBUILD_OUTPUT:=""}" != "" ]]; then if is_rel_path "${KBUILD_OUTPUT}"; then KBUILD_OUTPUT="$(realpath "${PWD}/${KBUILD_OUTPUT}")" fi - kernel_bzimage="${KBUILD_OUTPUT}/${X86_BZIMAGE}" + kernel_bzimage="${KBUILD_OUTPUT}/${BZIMAGE}" make_command="${make_command} KBUILD_OUTPUT=${KBUILD_OUTPUT}" fi diff --git a/tools/testing/selftests/bpf/xdp_redirect_multi.c b/tools/testing/selftests/bpf/xdp_redirect_multi.c index f5ffba341c17..51c8224b4ccc 100644 --- a/tools/testing/selftests/bpf/xdp_redirect_multi.c +++ b/tools/testing/selftests/bpf/xdp_redirect_multi.c @@ -85,10 +85,7 @@ int main(int argc, char **argv) { int prog_fd, group_all, mac_map; struct bpf_program *ingress_prog, *egress_prog; - struct bpf_prog_load_attr prog_load_attr = { - .prog_type = BPF_PROG_TYPE_UNSPEC, - }; - int i, ret, opt, egress_prog_fd = 0; + int i, err, ret, opt, egress_prog_fd = 0; struct bpf_devmap_val devmap_val; bool attach_egress_prog = false; unsigned char mac_addr[6]; @@ -147,10 +144,14 @@ int main(int argc, char **argv) printf("\n"); snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - prog_load_attr.file = filename; - - if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) + obj = bpf_object__open_file(filename, NULL); + err = libbpf_get_error(obj); + if (err) + goto err_out; + err = bpf_object__load(obj); + if (err) goto err_out; + prog_fd = bpf_program__fd(bpf_object__next_program(obj, NULL)); if (attach_egress_prog) group_all = bpf_object__find_map_fd_by_name(obj, "map_egress"); diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c index fe7f423b8c3f..0a5d23da486d 100644 --- a/tools/testing/selftests/bpf/xdpxceiver.c +++ b/tools/testing/selftests/bpf/xdpxceiver.c @@ -100,6 +100,12 @@ #include "xdpxceiver.h" #include "../kselftest.h" +/* AF_XDP APIs were moved into libxdp and marked as deprecated in libbpf. + * Until xdpxceiver is either moved or re-writed into libxdp, suppress + * deprecation warnings in this file + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62"; static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61"; static const char *IP1 = "192.168.100.162"; @@ -1217,7 +1223,7 @@ static bool hugepages_present(struct ifobject *ifobject) void *bufs; bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_HUGETLB, -1, 0); + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); if (bufs == MAP_FAILED) return false; @@ -1364,6 +1370,10 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_ testapp_invalid_desc(test); break; case TEST_TYPE_UNALIGNED_INV_DESC: + if (!hugepages_present(test->ifobj_tx)) { + ksft_test_result_skip("No 2M huge pages present.\n"); + return; + } test_spec_set_name(test, "UNALIGNED_INV_DESC"); test->ifobj_tx->umem->unaligned_mode = true; test->ifobj_rx->umem->unaligned_mode = true; diff --git a/tools/testing/selftests/damon/.gitignore b/tools/testing/selftests/damon/.gitignore new file mode 100644 index 000000000000..c6c2965a6607 --- /dev/null +++ b/tools/testing/selftests/damon/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +huge_count_read_write diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile index 8a3f2cd9fec0..937d36ae9a69 100644 --- a/tools/testing/selftests/damon/Makefile +++ b/tools/testing/selftests/damon/Makefile @@ -1,7 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for damon selftests -TEST_FILES = _chk_dependency.sh -TEST_PROGS = debugfs_attrs.sh +TEST_GEN_FILES += huge_count_read_write + +TEST_FILES = _chk_dependency.sh _debugfs_common.sh +TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh +TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh include ../lib.mk diff --git a/tools/testing/selftests/damon/_debugfs_common.sh b/tools/testing/selftests/damon/_debugfs_common.sh new file mode 100644 index 000000000000..48989d4813ae --- /dev/null +++ b/tools/testing/selftests/damon/_debugfs_common.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +test_write_result() { + file=$1 + content=$2 + orig_content=$3 + expect_reason=$4 + expected=$5 + + echo "$content" > "$file" + if [ $? -ne "$expected" ] + then + echo "writing $content to $file doesn't return $expected" + echo "expected because: $expect_reason" + echo "$orig_content" > "$file" + exit 1 + fi +} + +test_write_succ() { + test_write_result "$1" "$2" "$3" "$4" 0 +} + +test_write_fail() { + test_write_result "$1" "$2" "$3" "$4" 1 +} + +test_content() { + file=$1 + orig_content=$2 + expected=$3 + expect_reason=$4 + + content=$(cat "$file") + if [ "$content" != "$expected" ] + then + echo "reading $file expected $expected but $content" + echo "expected because: $expect_reason" + echo "$orig_content" > "$file" + exit 1 + fi +} + +source ./_chk_dependency.sh + +damon_onoff="$DBGFS/monitor_on" +if [ $(cat "$damon_onoff") = "on" ] +then + echo "monitoring is on" + exit $ksft_skip +fi diff --git a/tools/testing/selftests/damon/debugfs_attrs.sh b/tools/testing/selftests/damon/debugfs_attrs.sh index 196b6640bf37..902e312bca89 100644 --- a/tools/testing/selftests/damon/debugfs_attrs.sh +++ b/tools/testing/selftests/damon/debugfs_attrs.sh @@ -1,48 +1,7 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -test_write_result() { - file=$1 - content=$2 - orig_content=$3 - expect_reason=$4 - expected=$5 - - echo "$content" > "$file" - if [ $? -ne "$expected" ] - then - echo "writing $content to $file doesn't return $expected" - echo "expected because: $expect_reason" - echo "$orig_content" > "$file" - exit 1 - fi -} - -test_write_succ() { - test_write_result "$1" "$2" "$3" "$4" 0 -} - -test_write_fail() { - test_write_result "$1" "$2" "$3" "$4" 1 -} - -test_content() { - file=$1 - orig_content=$2 - expected=$3 - expect_reason=$4 - - content=$(cat "$file") - if [ "$content" != "$expected" ] - then - echo "reading $file expected $expected but $content" - echo "expected because: $expect_reason" - echo "$orig_content" > "$file" - exit 1 - fi -} - -source ./_chk_dependency.sh +source _debugfs_common.sh # Test attrs file # =============== @@ -56,33 +15,3 @@ test_write_fail "$file" "1 2 3 5 4" "$orig_content" \ "min_nr_regions > max_nr_regions" test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written" echo "$orig_content" > "$file" - -# Test schemes file -# ================= - -file="$DBGFS/schemes" -orig_content=$(cat "$file") - -test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \ - "$orig_content" "valid input" -test_write_fail "$file" "1 2 -3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines" -test_write_succ "$file" "" "$orig_content" "disabling" -echo "$orig_content" > "$file" - -# Test target_ids file -# ==================== - -file="$DBGFS/target_ids" -orig_content=$(cat "$file") - -test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input" -test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input" -test_content "$file" "$orig_content" "1 2" "non-integer was there" -test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input" -test_content "$file" "$orig_content" "" "wrong input written" -test_write_succ "$file" "" "$orig_content" "empty input" -test_content "$file" "$orig_content" "" "empty input written" -echo "$orig_content" > "$file" - -echo "PASS" diff --git a/tools/testing/selftests/damon/debugfs_empty_targets.sh b/tools/testing/selftests/damon/debugfs_empty_targets.sh new file mode 100644 index 000000000000..87aff8083822 --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_empty_targets.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test empty targets case +# ======================= + +orig_target_ids=$(cat "$DBGFS/target_ids") +echo "" > "$DBGFS/target_ids" +orig_monitor_on=$(cat "$DBGFS/monitor_on") +test_write_fail "$DBGFS/monitor_on" "on" "orig_monitor_on" "empty target ids" +echo "$orig_target_ids" > "$DBGFS/target_ids" diff --git a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh new file mode 100644 index 000000000000..922cadac2950 --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test huge count read write +# ========================== + +dmesg -C + +for file in "$DBGFS/"* +do + ./huge_count_read_write "$file" +done + +if dmesg | grep -q WARNING +then + dmesg + exit 1 +else + exit 0 +fi diff --git a/tools/testing/selftests/damon/debugfs_schemes.sh b/tools/testing/selftests/damon/debugfs_schemes.sh new file mode 100644 index 000000000000..5b39ab44731c --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_schemes.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test schemes file +# ================= + +file="$DBGFS/schemes" +orig_content=$(cat "$file") + +test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \ + "$orig_content" "valid input" +test_write_fail "$file" "1 2 +3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines" +test_write_succ "$file" "" "$orig_content" "disabling" +test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \ + "$orig_content" "wrong condition ranges" +echo "$orig_content" > "$file" diff --git a/tools/testing/selftests/damon/debugfs_target_ids.sh b/tools/testing/selftests/damon/debugfs_target_ids.sh new file mode 100644 index 000000000000..49aeabdb0aae --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_target_ids.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test target_ids file +# ==================== + +file="$DBGFS/target_ids" +orig_content=$(cat "$file") + +test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input" +test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input" +test_content "$file" "$orig_content" "1 2" "non-integer was there" +test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input" +test_content "$file" "$orig_content" "" "wrong input written" +test_write_succ "$file" "" "$orig_content" "empty input" +test_content "$file" "$orig_content" "" "empty input written" +echo "$orig_content" > "$file" diff --git a/tools/testing/selftests/damon/huge_count_read_write.c b/tools/testing/selftests/damon/huge_count_read_write.c new file mode 100644 index 000000000000..ad7a6b4cf338 --- /dev/null +++ b/tools/testing/selftests/damon/huge_count_read_write.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: SeongJae Park <sj@kernel.org> + */ + +#include <fcntl.h> +#include <stdlib.h> +#include <unistd.h> +#include <stdio.h> + +void write_read_with_huge_count(char *file) +{ + int filedesc = open(file, O_RDWR); + char buf[25]; + int ret; + + printf("%s %s\n", __func__, file); + if (filedesc < 0) { + fprintf(stderr, "failed opening %s\n", file); + exit(1); + } + + write(filedesc, "", 0xfffffffful); + perror("after write: "); + ret = read(filedesc, buf, 0xfffffffful); + perror("after read: "); + close(filedesc); +} + +int main(int argc, char *argv[]) +{ + if (argc != 2) { + fprintf(stderr, "Usage: %s <file>\n", argv[0]); + exit(1); + } + write_read_with_huge_count(argv[1]); + + return 0; +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh index b513f64d9092..026a126f584d 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh @@ -72,6 +72,35 @@ rif_mac_profile_replacement_test() ip link set $h1.10 address $h1_10_mac } +rif_mac_profile_consolidation_test() +{ + local count=$1; shift + local h1_20_mac + + RET=0 + + if [[ $count -eq 1 ]]; then + return + fi + + h1_20_mac=$(mac_get $h1.20) + + # Set the MAC of $h1.20 to that of $h1.10 and confirm that they are + # using the same MAC profile. + ip link set $h1.20 address 00:11:11:11:11:11 + check_err $? + + occ=$(devlink -j resource show $DEVLINK_DEV \ + | jq '.[][][] | select(.name=="rif_mac_profiles") |.["occ"]') + + [[ $occ -eq $((count - 1)) ]] + check_err $? "MAC profile occupancy did not decrease" + + log_test "RIF MAC profile consolidation" + + ip link set $h1.20 address $h1_20_mac +} + rif_mac_profile_shared_replacement_test() { local count=$1; shift @@ -104,6 +133,7 @@ rif_mac_profile_edit_test() create_max_rif_mac_profiles $count rif_mac_profile_replacement_test + rif_mac_profile_consolidation_test $count rif_mac_profile_shared_replacement_test $count } diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh index 729a86cc4ede..3639b89c81ba 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh @@ -145,23 +145,6 @@ sanitization_single_dev_no_local_ip_test() log_test "vxlan device with no local ip" } -sanitization_single_dev_local_ipv6_test() -{ - RET=0 - - ip link add dev br0 type bridge mcast_snooping 0 - - ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \ - ttl 20 tos inherit local 2001:db8::1 dstport 4789 - - sanitization_single_dev_test_fail - - ip link del dev vxlan0 - ip link del dev br0 - - log_test "vxlan device with local ipv6 address" -} - sanitization_single_dev_learning_enabled_test() { RET=0 @@ -276,7 +259,6 @@ sanitization_single_dev_test() sanitization_single_dev_mcast_enabled_test sanitization_single_dev_mcast_group_test sanitization_single_dev_no_local_ip_test - sanitization_single_dev_local_ipv6_test sanitization_single_dev_learning_enabled_test sanitization_single_dev_local_interface_test sanitization_single_dev_port_range_test diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile index 39f2bbe8dd3d..d7b312b44a62 100644 --- a/tools/testing/selftests/gpio/Makefile +++ b/tools/testing/selftests/gpio/Makefile @@ -3,5 +3,6 @@ TEST_PROGS := gpio-mockup.sh TEST_FILES := gpio-mockup-sysfs.sh TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev +CFLAGS += -O2 -g -Wall -I../../../../usr/include/ include ../lib.mk diff --git a/tools/testing/selftests/gpio/gpio-mockup-cdev.c b/tools/testing/selftests/gpio/gpio-mockup-cdev.c index e83eac71621a..d1640f44f8ac 100644 --- a/tools/testing/selftests/gpio/gpio-mockup-cdev.c +++ b/tools/testing/selftests/gpio/gpio-mockup-cdev.c @@ -117,7 +117,7 @@ int main(int argc, char *argv[]) { char *chip; int opt, ret, cfd, lfd; - unsigned int offset, val, abiv; + unsigned int offset, val = 0, abiv; uint32_t flags_v1; uint64_t flags_v2; diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 3763105029fb..00814c0f87a6 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -30,6 +30,7 @@ /x86_64/svm_int_ctl_test /x86_64/sync_regs_test /x86_64/tsc_msrs_test +/x86_64/userspace_io_test /x86_64/userspace_msr_exit_test /x86_64/vmx_apic_access_test /x86_64/vmx_close_while_nested_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index c4e34717826a..f307c9f61981 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -59,6 +59,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test +TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 6a1a37f30494..da2b702da71a 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -71,6 +71,15 @@ enum vm_guest_mode { #endif +#if defined(__x86_64__) +unsigned long vm_compute_max_gfn(struct kvm_vm *vm); +#else +static inline unsigned long vm_compute_max_gfn(struct kvm_vm *vm) +{ + return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; +} +#endif + #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE) diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c index f968dfd4ee88..aed9dc3ca1e9 100644 --- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c +++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c @@ -12,6 +12,7 @@ #include <stdio.h> #include <stdlib.h> #include <string.h> +#include <sys/resource.h> #include "test_util.h" @@ -40,11 +41,40 @@ int main(int argc, char *argv[]) { int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID); int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); + /* + * Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds + + * an arbitrary number for everything else. + */ + int nr_fds_wanted = kvm_max_vcpus + 100; + struct rlimit rl; pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id); pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus); /* + * Check that we're allowed to open nr_fds_wanted file descriptors and + * try raising the limits if needed. + */ + TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!"); + + if (rl.rlim_cur < nr_fds_wanted) { + rl.rlim_cur = nr_fds_wanted; + if (rl.rlim_max < nr_fds_wanted) { + int old_rlim_max = rl.rlim_max; + rl.rlim_max = nr_fds_wanted; + + int r = setrlimit(RLIMIT_NOFILE, &rl); + if (r < 0) { + printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n", + old_rlim_max, nr_fds_wanted); + exit(KSFT_SKIP); + } + } else { + TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!"); + } + } + + /* * Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID. * Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID * in this case. diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c index 3836322add00..ba1fdc3dcf4a 100644 --- a/tools/testing/selftests/kvm/kvm_page_table_test.c +++ b/tools/testing/selftests/kvm/kvm_page_table_test.c @@ -280,7 +280,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) #ifdef __s390x__ alignment = max(0x100000, alignment); #endif - guest_test_phys_mem = align_down(guest_test_virt_mem, alignment); + guest_test_phys_mem = align_down(guest_test_phys_mem, alignment); /* Set up the shared data structure test_args */ test_args.vm = vm; diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 8f2e0bb1ef96..daf6fdb217a7 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -302,7 +302,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) (1ULL << (vm->va_bits - 1)) >> vm->page_shift); /* Limit physical addresses to PA-bits. */ - vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; + vm->max_gfn = vm_compute_max_gfn(vm); /* Allocate and setup memory for guest. */ vm->vpages_mapped = sparsebit_alloc(); diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 82c39db91369..eef7b34756d5 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1431,3 +1431,71 @@ struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpui return cpuid; } + +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 + +static inline unsigned x86_family(unsigned int eax) +{ + unsigned int x86; + + x86 = (eax >> 8) & 0xf; + + if (x86 == 0xf) + x86 += (eax >> 20) & 0xff; + + return x86; +} + +unsigned long vm_compute_max_gfn(struct kvm_vm *vm) +{ + const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ + unsigned long ht_gfn, max_gfn, max_pfn; + uint32_t eax, ebx, ecx, edx, max_ext_leaf; + + max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1; + + /* Avoid reserved HyperTransport region on AMD processors. */ + eax = ecx = 0; + cpuid(&eax, &ebx, &ecx, &edx); + if (ebx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx || + ecx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx || + edx != X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) + return max_gfn; + + /* On parts with <40 physical address bits, the area is fully hidden */ + if (vm->pa_bits < 40) + return max_gfn; + + /* Before family 17h, the HyperTransport area is just below 1T. */ + ht_gfn = (1 << 28) - num_ht_pages; + eax = 1; + cpuid(&eax, &ebx, &ecx, &edx); + if (x86_family(eax) < 0x17) + goto done; + + /* + * Otherwise it's at the top of the physical address space, possibly + * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use + * the old conservative value if MAXPHYADDR is not enumerated. + */ + eax = 0x80000000; + cpuid(&eax, &ebx, &ecx, &edx); + max_ext_leaf = eax; + if (max_ext_leaf < 0x80000008) + goto done; + + eax = 0x80000008; + cpuid(&eax, &ebx, &ecx, &edx); + max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1; + if (max_ext_leaf >= 0x8000001f) { + eax = 0x8000001f; + cpuid(&eax, &ebx, &ecx, &edx); + max_pfn >>= (ebx >> 6) & 0x3f; + } + + ht_gfn = max_pfn - num_ht_pages; +done: + return min(max_gfn, ht_gfn - 1); +} diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 91d88aaa9899..672915ce73d8 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -165,10 +165,10 @@ static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid, vcpu_set_cpuid(vm, VCPU_ID, cpuid); } -static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, - struct kvm_cpuid2 *best) +static void guest_test_msrs_access(void) { struct kvm_run *run; + struct kvm_vm *vm; struct ucall uc; int stage = 0, r; struct kvm_cpuid_entry2 feat = { @@ -180,11 +180,34 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, struct kvm_cpuid_entry2 dbg = { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES }; - struct kvm_enable_cap cap = {0}; - - run = vcpu_state(vm, VCPU_ID); + struct kvm_cpuid2 *best; + vm_vaddr_t msr_gva; + struct kvm_enable_cap cap = { + .cap = KVM_CAP_HYPERV_ENFORCE_CPUID, + .args = {1} + }; + struct msr_data *msr; while (true) { + vm = vm_create_default(VCPU_ID, 0, guest_msr); + + msr_gva = vm_vaddr_alloc_page(vm); + memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); + msr = addr_gva2hva(vm, msr_gva); + + vcpu_args_set(vm, VCPU_ID, 1, msr_gva); + vcpu_enable_cap(vm, VCPU_ID, &cap); + + vcpu_set_hv_cpuid(vm, VCPU_ID); + + best = kvm_get_supported_hv_cpuid(); + + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); + + run = vcpu_state(vm, VCPU_ID); + switch (stage) { case 0: /* @@ -315,6 +338,7 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, * capability enabled and guest visible CPUID bit unset. */ cap.cap = KVM_CAP_HYPERV_SYNIC2; + cap.args[0] = 0; vcpu_enable_cap(vm, VCPU_ID, &cap); break; case 22: @@ -461,9 +485,9 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, switch (get_ucall(vm, VCPU_ID, &uc)) { case UCALL_SYNC: - TEST_ASSERT(uc.args[1] == stage, - "Unexpected stage: %ld (%d expected)\n", - uc.args[1], stage); + TEST_ASSERT(uc.args[1] == 0, + "Unexpected stage: %ld (0 expected)\n", + uc.args[1]); break; case UCALL_ABORT: TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], @@ -474,13 +498,14 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, } stage++; + kvm_vm_free(vm); } } -static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall, - void *input, void *output, struct kvm_cpuid2 *best) +static void guest_test_hcalls_access(void) { struct kvm_run *run; + struct kvm_vm *vm; struct ucall uc; int stage = 0, r; struct kvm_cpuid_entry2 feat = { @@ -493,10 +518,38 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall struct kvm_cpuid_entry2 dbg = { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES }; - - run = vcpu_state(vm, VCPU_ID); + struct kvm_enable_cap cap = { + .cap = KVM_CAP_HYPERV_ENFORCE_CPUID, + .args = {1} + }; + vm_vaddr_t hcall_page, hcall_params; + struct hcall_data *hcall; + struct kvm_cpuid2 *best; while (true) { + vm = vm_create_default(VCPU_ID, 0, guest_hcall); + + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); + + /* Hypercall input/output */ + hcall_page = vm_vaddr_alloc_pages(vm, 2); + hcall = addr_gva2hva(vm, hcall_page); + memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); + + hcall_params = vm_vaddr_alloc_page(vm); + memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); + + vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params); + vcpu_enable_cap(vm, VCPU_ID, &cap); + + vcpu_set_hv_cpuid(vm, VCPU_ID); + + best = kvm_get_supported_hv_cpuid(); + + run = vcpu_state(vm, VCPU_ID); + switch (stage) { case 0: hcall->control = 0xdeadbeef; @@ -606,9 +659,9 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall switch (get_ucall(vm, VCPU_ID, &uc)) { case UCALL_SYNC: - TEST_ASSERT(uc.args[1] == stage, - "Unexpected stage: %ld (%d expected)\n", - uc.args[1], stage); + TEST_ASSERT(uc.args[1] == 0, + "Unexpected stage: %ld (0 expected)\n", + uc.args[1]); break; case UCALL_ABORT: TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], @@ -619,66 +672,15 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall } stage++; + kvm_vm_free(vm); } } int main(void) { - struct kvm_cpuid2 *best; - struct kvm_vm *vm; - vm_vaddr_t msr_gva, hcall_page, hcall_params; - struct kvm_enable_cap cap = { - .cap = KVM_CAP_HYPERV_ENFORCE_CPUID, - .args = {1} - }; - - /* Test MSRs */ - vm = vm_create_default(VCPU_ID, 0, guest_msr); - - msr_gva = vm_vaddr_alloc_page(vm); - memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); - vcpu_args_set(vm, VCPU_ID, 1, msr_gva); - vcpu_enable_cap(vm, VCPU_ID, &cap); - - vcpu_set_hv_cpuid(vm, VCPU_ID); - - best = kvm_get_supported_hv_cpuid(); - - vm_init_descriptor_tables(vm); - vcpu_init_descriptor_tables(vm, VCPU_ID); - vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); - pr_info("Testing access to Hyper-V specific MSRs\n"); - guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva), - best); - kvm_vm_free(vm); - - /* Test hypercalls */ - vm = vm_create_default(VCPU_ID, 0, guest_hcall); - - vm_init_descriptor_tables(vm); - vcpu_init_descriptor_tables(vm, VCPU_ID); - vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); - - /* Hypercall input/output */ - hcall_page = vm_vaddr_alloc_pages(vm, 2); - memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); - - hcall_params = vm_vaddr_alloc_page(vm); - memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); - - vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params); - vcpu_enable_cap(vm, VCPU_ID, &cap); - - vcpu_set_hv_cpuid(vm, VCPU_ID); - - best = kvm_get_supported_hv_cpuid(); + guest_test_msrs_access(); pr_info("Testing access to Hyper-V hypercalls\n"); - guest_test_hcalls_access(vm, addr_gva2hva(vm, hcall_params), - addr_gva2hva(vm, hcall_page), - addr_gva2hva(vm, hcall_page) + getpagesize(), - best); - - kvm_vm_free(vm); + guest_test_hcalls_access(); } diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c index 5ba325cd64bf..29b18d565cf4 100644 --- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c +++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c @@ -54,12 +54,15 @@ static struct kvm_vm *sev_vm_create(bool es) return vm; } -static struct kvm_vm *__vm_create(void) +static struct kvm_vm *aux_vm_create(bool with_vcpus) { struct kvm_vm *vm; int i; vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); + if (!with_vcpus) + return vm; + for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i) vm_vcpu_add(vm, i); @@ -89,11 +92,11 @@ static void test_sev_migrate_from(bool es) { struct kvm_vm *src_vm; struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS]; - int i; + int i, ret; src_vm = sev_vm_create(es); for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i) - dst_vms[i] = __vm_create(); + dst_vms[i] = aux_vm_create(true); /* Initial migration from the src to the first dst. */ sev_migrate_from(dst_vms[0]->fd, src_vm->fd); @@ -102,7 +105,10 @@ static void test_sev_migrate_from(bool es) sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd); /* Migrate the guest back to the original VM. */ - sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd); + ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd); + TEST_ASSERT(ret == -1 && errno == EIO, + "VM that was migrated from should be dead. ret %d, errno: %d\n", ret, + errno); kvm_vm_free(src_vm); for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i) @@ -146,6 +152,8 @@ static void test_sev_migrate_locking(void) for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) pthread_join(pt[i], NULL); + for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) + kvm_vm_free(input[i].vm); } static void test_sev_migrate_parameters(void) @@ -157,12 +165,11 @@ static void test_sev_migrate_parameters(void) sev_vm = sev_vm_create(/* es= */ false); sev_es_vm = sev_vm_create(/* es= */ true); vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); - vm_no_sev = __vm_create(); + vm_no_sev = aux_vm_create(true); sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL); vm_vcpu_add(sev_es_vm_no_vmsa, 1); - ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd); TEST_ASSERT( ret == -1 && errno == EINVAL, @@ -191,13 +198,151 @@ static void test_sev_migrate_parameters(void) TEST_ASSERT(ret == -1 && errno == EINVAL, "Migrations require SEV enabled. ret %d, errno: %d\n", ret, errno); + + kvm_vm_free(sev_vm); + kvm_vm_free(sev_es_vm); + kvm_vm_free(sev_es_vm_no_vmsa); + kvm_vm_free(vm_no_vcpu); + kvm_vm_free(vm_no_sev); +} + +static int __sev_mirror_create(int dst_fd, int src_fd) +{ + struct kvm_enable_cap cap = { + .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM, + .args = { src_fd } + }; + + return ioctl(dst_fd, KVM_ENABLE_CAP, &cap); +} + + +static void sev_mirror_create(int dst_fd, int src_fd) +{ + int ret; + + ret = __sev_mirror_create(dst_fd, src_fd); + TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno); +} + +static void test_sev_mirror(bool es) +{ + struct kvm_vm *src_vm, *dst_vm; + struct kvm_sev_launch_start start = { + .policy = es ? SEV_POLICY_ES : 0 + }; + int i; + + src_vm = sev_vm_create(es); + dst_vm = aux_vm_create(false); + + sev_mirror_create(dst_vm->fd, src_vm->fd); + + /* Check that we can complete creation of the mirror VM. */ + for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i) + vm_vcpu_add(dst_vm, i); + sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_START, &start); + if (es) + sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); + + kvm_vm_free(src_vm); + kvm_vm_free(dst_vm); +} + +static void test_sev_mirror_parameters(void) +{ + struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu; + int ret; + + sev_vm = sev_vm_create(/* es= */ false); + sev_es_vm = sev_vm_create(/* es= */ true); + vm_with_vcpu = aux_vm_create(true); + vm_no_vcpu = aux_vm_create(false); + + ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "Should not be able copy context to self. ret: %d, errno: %d\n", + ret, errno); + + ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n", + ret, errno); + + ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n", + ret, errno); + + ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd); + TEST_ASSERT(ret == -1 && errno == EINVAL, + "Copy context requires SEV enabled. ret %d, errno: %d\n", ret, + errno); + + ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n", + ret, errno); + + kvm_vm_free(sev_vm); + kvm_vm_free(sev_es_vm); + kvm_vm_free(vm_with_vcpu); + kvm_vm_free(vm_no_vcpu); +} + +static void test_sev_move_copy(void) +{ + struct kvm_vm *dst_vm, *sev_vm, *mirror_vm, *dst_mirror_vm; + int ret; + + sev_vm = sev_vm_create(/* es= */ false); + dst_vm = aux_vm_create(true); + mirror_vm = aux_vm_create(false); + dst_mirror_vm = aux_vm_create(false); + + sev_mirror_create(mirror_vm->fd, sev_vm->fd); + ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd); + TEST_ASSERT(ret == -1 && errno == EBUSY, + "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret, + errno); + + /* The mirror itself can be migrated. */ + sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd); + ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd); + TEST_ASSERT(ret == -1 && errno == EBUSY, + "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret, + errno); + + /* + * mirror_vm is not a mirror anymore, dst_mirror_vm is. Thus, + * the owner can be copied as soon as dst_mirror_vm is gone. + */ + kvm_vm_free(dst_mirror_vm); + sev_migrate_from(dst_vm->fd, sev_vm->fd); + + kvm_vm_free(mirror_vm); + kvm_vm_free(dst_vm); + kvm_vm_free(sev_vm); } int main(int argc, char *argv[]) { - test_sev_migrate_from(/* es= */ false); - test_sev_migrate_from(/* es= */ true); - test_sev_migrate_locking(); - test_sev_migrate_parameters(); + if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) { + test_sev_migrate_from(/* es= */ false); + test_sev_migrate_from(/* es= */ true); + test_sev_migrate_locking(); + test_sev_migrate_parameters(); + if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) + test_sev_move_copy(); + } + if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) { + test_sev_mirror(/* es= */ false); + test_sev_mirror(/* es= */ true); + test_sev_mirror_parameters(); + } return 0; } diff --git a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c index df04f56ce859..30a81038df46 100644 --- a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c +++ b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c @@ -75,7 +75,7 @@ static void l1_guest_code(struct svm_test_data *svm) vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; /* No intercepts for real and virtual interrupts */ - vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR); + vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR)); /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */ vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT); diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c new file mode 100644 index 000000000000..e4bef2e05686 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> + +#include "test_util.h" + +#include "kvm_util.h" +#include "processor.h" + +#define VCPU_ID 1 + +static void guest_ins_port80(uint8_t *buffer, unsigned int count) +{ + unsigned long end; + + if (count == 2) + end = (unsigned long)buffer + 1; + else + end = (unsigned long)buffer + 8192; + + asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory"); + GUEST_ASSERT_1(count == 0, count); + GUEST_ASSERT_2((unsigned long)buffer == end, buffer, end); +} + +static void guest_code(void) +{ + uint8_t buffer[8192]; + int i; + + /* + * Special case tests. main() will adjust RCX 2 => 1 and 3 => 8192 to + * test that KVM doesn't explode when userspace modifies the "count" on + * a userspace I/O exit. KVM isn't required to play nice with the I/O + * itself as KVM doesn't support manipulating the count, it just needs + * to not explode or overflow a buffer. + */ + guest_ins_port80(buffer, 2); + guest_ins_port80(buffer, 3); + + /* Verify KVM fills the buffer correctly when not stuffing RCX. */ + memset(buffer, 0, sizeof(buffer)); + guest_ins_port80(buffer, 8192); + for (i = 0; i < 8192; i++) + GUEST_ASSERT_2(buffer[i] == 0xaa, i, buffer[i]); + + GUEST_DONE(); +} + +int main(int argc, char *argv[]) +{ + struct kvm_regs regs; + struct kvm_run *run; + struct kvm_vm *vm; + struct ucall uc; + int rc; + + /* Tell stdout not to buffer its content */ + setbuf(stdout, NULL); + + /* Create VM */ + vm = vm_create_default(VCPU_ID, 0, guest_code); + run = vcpu_state(vm, VCPU_ID); + + memset(®s, 0, sizeof(regs)); + + while (1) { + rc = _vcpu_run(vm, VCPU_ID); + + TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Unexpected exit reason: %u (%s),\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + + if (get_ucall(vm, VCPU_ID, &uc)) + break; + + TEST_ASSERT(run->io.port == 0x80, + "Expected I/O at port 0x80, got port 0x%x\n", run->io.port); + + /* + * Modify the rep string count in RCX: 2 => 1 and 3 => 8192. + * Note, this abuses KVM's batching of rep string I/O to avoid + * getting stuck in an infinite loop. That behavior isn't in + * scope from a testing perspective as it's not ABI in any way, + * i.e. it really is abusing internal KVM knowledge. + */ + vcpu_regs_get(vm, VCPU_ID, ®s); + if (regs.rcx == 2) + regs.rcx = 1; + if (regs.rcx == 3) + regs.rcx = 8192; + memset((void *)run + run->io.data_offset, 0xaa, 4096); + vcpu_regs_set(vm, VCPU_ID, ®s); + } + + switch (uc.cmd) { + case UCALL_DONE: + break; + case UCALL_ABORT: + TEST_FAIL("%s at %s:%ld : argN+1 = 0x%lx, argN+2 = 0x%lx", + (const char *)uc.args[0], __FILE__, uc.args[1], + uc.args[2], uc.args[3]); + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + } + + kvm_vm_free(vm); + return 0; +} diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 7615f29831eb..9897fa9ab953 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -34,6 +34,7 @@ TEST_PROGS += srv6_end_dt46_l3vpn_test.sh TEST_PROGS += srv6_end_dt4_l3vpn_test.sh TEST_PROGS += srv6_end_dt6_l3vpn_test.sh TEST_PROGS += vrf_strict_mode_test.sh +TEST_PROGS += arp_ndisc_evict_nocarrier.sh TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh TEST_GEN_FILES = socket nettest diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh index 7caa4f0e067d..412d85205546 100755 --- a/tools/testing/selftests/net/fcnal-test.sh +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -462,6 +462,22 @@ cleanup() ip netns del ${NSC} >/dev/null 2>&1 } +cleanup_vrf_dup() +{ + ip link del ${NSA_DEV2} >/dev/null 2>&1 + ip netns pids ${NSC} | xargs kill 2>/dev/null + ip netns del ${NSC} >/dev/null 2>&1 +} + +setup_vrf_dup() +{ + # some VRF tests use ns-C which has the same config as + # ns-B but for a device NOT in the VRF + create_ns ${NSC} "-" "-" + connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \ + ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64 +} + setup() { local with_vrf=${1} @@ -491,12 +507,6 @@ setup() ip -netns ${NSB} ro add ${VRF_IP}/32 via ${NSA_IP} dev ${NSB_DEV} ip -netns ${NSB} -6 ro add ${VRF_IP6}/128 via ${NSA_IP6} dev ${NSB_DEV} - - # some VRF tests use ns-C which has the same config as - # ns-B but for a device NOT in the VRF - create_ns ${NSC} "-" "-" - connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \ - ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64 else ip -netns ${NSA} ro add ${NSB_LO_IP}/32 via ${NSB_IP} dev ${NSA_DEV} ip -netns ${NSA} ro add ${NSB_LO_IP6}/128 via ${NSB_IP6} dev ${NSA_DEV} @@ -1247,7 +1257,9 @@ ipv4_tcp_vrf() log_test_addr ${a} $? 1 "Global server, local connection" # run MD5 tests + setup_vrf_dup ipv4_tcp_md5 + cleanup_vrf_dup # # enable VRF global server @@ -1813,8 +1825,9 @@ ipv4_addr_bind_vrf() for a in ${NSA_IP} ${VRF_IP} do log_start + show_hint "Socket not bound to VRF, but address is in VRF" run_cmd nettest -s -R -P icmp -l ${a} -b - log_test_addr ${a} $? 0 "Raw socket bind to local address" + log_test_addr ${a} $? 1 "Raw socket bind to local address" log_start run_cmd nettest -s -R -P icmp -l ${a} -I ${NSA_DEV} -b @@ -2215,7 +2228,7 @@ ipv6_ping_vrf() log_start show_hint "Fails since VRF device does not support linklocal or multicast" run_cmd ${ping6} -c1 -w1 ${a} - log_test_addr ${a} $? 2 "ping out, VRF bind" + log_test_addr ${a} $? 1 "ping out, VRF bind" done for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV} @@ -2743,7 +2756,9 @@ ipv6_tcp_vrf() log_test_addr ${a} $? 1 "Global server, local connection" # run MD5 tests + setup_vrf_dup ipv6_tcp_md5 + cleanup_vrf_dup # # enable VRF global server @@ -3446,11 +3461,14 @@ ipv6_addr_bind_novrf() run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b log_test_addr ${a} $? 0 "TCP socket bind to local address after device bind" + # Sadly, the kernel allows binding a socket to a device and then + # binding to an address not on the device. So this test passes + # when it really should not a=${NSA_LO_IP6} log_start - show_hint "Should fail with 'Cannot assign requested address'" + show_hint "Tecnically should fail since address is not on device but kernel allows" run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b - log_test_addr ${a} $? 1 "TCP socket bind to out of scope local address" + log_test_addr ${a} $? 0 "TCP socket bind to out of scope local address" } ipv6_addr_bind_vrf() @@ -3499,10 +3517,15 @@ ipv6_addr_bind_vrf() run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b log_test_addr ${a} $? 0 "TCP socket bind to local address with device bind" + # Sadly, the kernel allows binding a socket to a device and then + # binding to an address not on the device. The only restriction + # is that the address is valid in the L3 domain. So this test + # passes when it really should not a=${VRF_IP6} log_start + show_hint "Tecnically should fail since address is not on device but kernel allows" run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b - log_test_addr ${a} $? 1 "TCP socket bind to VRF address with device bind" + log_test_addr ${a} $? 0 "TCP socket bind to VRF address with device bind" a=${NSA_LO_IP6} log_start @@ -4042,8 +4065,8 @@ EOF ################################################################################ # main -TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_addr_bind ipv4_runtime ipv4_netfilter" -TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_addr_bind ipv6_runtime ipv6_netfilter" +TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_bind ipv4_runtime ipv4_netfilter" +TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_bind ipv6_runtime ipv6_netfilter" TESTS_OTHER="use_cases" PAUSE_ON_FAIL=no @@ -4108,8 +4131,6 @@ do # setup namespaces and config, but do not run any tests setup) setup; exit 0;; vrf_setup) setup "yes"; exit 0;; - - help) echo "Test names: $TESTS"; exit 0;; esac done @@ -4117,3 +4138,11 @@ cleanup 2>/dev/null printf "\nTests passed: %3d\n" ${nsuccess} printf "Tests failed: %3d\n" ${nfail} + +if [ $nfail -ne 0 ]; then + exit 1 # KSFT_FAIL +elif [ $nsuccess -eq 0 ]; then + exit $ksft_skip +fi + +exit 0 # KSFT_PASS diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index b5a69ad191b0..d444ee6aa3cb 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -629,6 +629,66 @@ ipv6_fcnal() log_test $? 0 "Nexthops removed on admin down" } +ipv6_grp_refs() +{ + if [ ! -x "$(command -v mausezahn)" ]; then + echo "SKIP: Could not run test; need mausezahn tool" + return + fi + + run_cmd "$IP link set dev veth1 up" + run_cmd "$IP link add veth1.10 link veth1 up type vlan id 10" + run_cmd "$IP link add veth1.20 link veth1 up type vlan id 20" + run_cmd "$IP -6 addr add 2001:db8:91::1/64 dev veth1.10" + run_cmd "$IP -6 addr add 2001:db8:92::1/64 dev veth1.20" + run_cmd "$IP -6 neigh add 2001:db8:91::2 lladdr 00:11:22:33:44:55 dev veth1.10" + run_cmd "$IP -6 neigh add 2001:db8:92::2 lladdr 00:11:22:33:44:55 dev veth1.20" + run_cmd "$IP nexthop add id 100 via 2001:db8:91::2 dev veth1.10" + run_cmd "$IP nexthop add id 101 via 2001:db8:92::2 dev veth1.20" + run_cmd "$IP nexthop add id 102 group 100" + run_cmd "$IP route add 2001:db8:101::1/128 nhid 102" + + # create per-cpu dsts through nh 100 + run_cmd "ip netns exec me mausezahn -6 veth1.10 -B 2001:db8:101::1 -A 2001:db8:91::1 -c 5 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1" + + # remove nh 100 from the group to delete the route potentially leaving + # a stale per-cpu dst which holds a reference to the nexthop's net + # device and to the IPv6 route + run_cmd "$IP nexthop replace id 102 group 101" + run_cmd "$IP route del 2001:db8:101::1/128" + + # add both nexthops to the group so a reference is taken on them + run_cmd "$IP nexthop replace id 102 group 100/101" + + # if the bug described in commit "net: nexthop: release IPv6 per-cpu + # dsts when replacing a nexthop group" exists at this point we have + # an unlinked IPv6 route (but not freed due to stale dst) with a + # reference over the group so we delete the group which will again + # only unlink it due to the route reference + run_cmd "$IP nexthop del id 102" + + # delete the nexthop with stale dst, since we have an unlinked + # group with a ref to it and an unlinked IPv6 route with ref to the + # group, the nh will only be unlinked and not freed so the stale dst + # remains forever and we get a net device refcount imbalance + run_cmd "$IP nexthop del id 100" + + # if a reference was lost this command will hang because the net device + # cannot be removed + timeout -s KILL 5 ip netns exec me ip link del veth1.10 >/dev/null 2>&1 + + # we can't cleanup if the command is hung trying to delete the netdev + if [ $? -eq 137 ]; then + return 1 + fi + + # cleanup + run_cmd "$IP link del veth1.20" + run_cmd "$IP nexthop flush" + + return 0 +} + ipv6_grp_fcnal() { local rc @@ -734,6 +794,9 @@ ipv6_grp_fcnal() run_cmd "$IP nexthop add id 108 group 31/24" log_test $? 2 "Nexthop group can not have a blackhole and another nexthop" + + ipv6_grp_refs + log_test $? 0 "Nexthop group replace refcounts" } ipv6_res_grp_fcnal() diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 5abe92d55b69..996af1ae3d3d 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -444,24 +444,63 @@ fib_rp_filter_test() setup set -e + ip netns add ns2 + ip netns set ns2 auto + + ip -netns ns2 link set dev lo up + + $IP link add name veth1 type veth peer name veth2 + $IP link set dev veth2 netns ns2 + $IP address add 192.0.2.1/24 dev veth1 + ip -netns ns2 address add 192.0.2.1/24 dev veth2 + $IP link set dev veth1 up + ip -netns ns2 link set dev veth2 up + $IP link set dev lo address 52:54:00:6a:c7:5e - $IP link set dummy0 address 52:54:00:6a:c7:5e - $IP link add dummy1 type dummy - $IP link set dummy1 address 52:54:00:6a:c7:5e - $IP link set dev dummy1 up + $IP link set dev veth1 address 52:54:00:6a:c7:5e + ip -netns ns2 link set dev lo address 52:54:00:6a:c7:5e + ip -netns ns2 link set dev veth2 address 52:54:00:6a:c7:5e + + # 1. (ns2) redirect lo's egress to veth2's egress + ip netns exec ns2 tc qdisc add dev lo parent root handle 1: fq_codel + ip netns exec ns2 tc filter add dev lo parent 1: protocol arp basic \ + action mirred egress redirect dev veth2 + ip netns exec ns2 tc filter add dev lo parent 1: protocol ip basic \ + action mirred egress redirect dev veth2 + + # 2. (ns1) redirect veth1's ingress to lo's ingress + $NS_EXEC tc qdisc add dev veth1 ingress + $NS_EXEC tc filter add dev veth1 ingress protocol arp basic \ + action mirred ingress redirect dev lo + $NS_EXEC tc filter add dev veth1 ingress protocol ip basic \ + action mirred ingress redirect dev lo + + # 3. (ns1) redirect lo's egress to veth1's egress + $NS_EXEC tc qdisc add dev lo parent root handle 1: fq_codel + $NS_EXEC tc filter add dev lo parent 1: protocol arp basic \ + action mirred egress redirect dev veth1 + $NS_EXEC tc filter add dev lo parent 1: protocol ip basic \ + action mirred egress redirect dev veth1 + + # 4. (ns2) redirect veth2's ingress to lo's ingress + ip netns exec ns2 tc qdisc add dev veth2 ingress + ip netns exec ns2 tc filter add dev veth2 ingress protocol arp basic \ + action mirred ingress redirect dev lo + ip netns exec ns2 tc filter add dev veth2 ingress protocol ip basic \ + action mirred ingress redirect dev lo + $NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1 $NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1 $NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1 - - $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel - $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo - $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=1 + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.accept_local=1 + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.route_localnet=1 set +e - run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1" + run_cmd "ip netns exec ns2 ping -w1 -c1 192.0.2.1" log_test $? 0 "rp_filter passes local packets" - run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1" + run_cmd "ip netns exec ns2 ping -w1 -c1 127.0.0.1" log_test $? 0 "rp_filter passes loopback packets" cleanup diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh new file mode 100755 index 000000000000..8748d1b1d95b --- /dev/null +++ b/tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh @@ -0,0 +1,543 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS="vlmc_control_test vlmc_querier_test vlmc_igmp_mld_version_test \ + vlmc_last_member_test vlmc_startup_query_test vlmc_membership_test \ + vlmc_querier_intvl_test vlmc_query_intvl_test vlmc_query_response_intvl_test \ + vlmc_router_port_test vlmc_filtering_test" +NUM_NETIFS=4 +CHECK_TC="yes" +TEST_GROUP="239.10.10.10" + +source lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64 + ip link add l $h1 $h1.10 up type vlan id 10 +} + +h1_destroy() +{ + ip link del $h1.10 + simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.2/24 2001:db8:1::2/64 + ip link add l $h2 $h2.10 up type vlan id 10 +} + +h2_destroy() +{ + ip link del $h2.10 + simple_if_fini $h2 192.0.2.2/24 2001:db8:1::2/64 +} + +switch_create() +{ + ip link add dev br0 type bridge mcast_snooping 1 mcast_querier 1 vlan_filtering 1 + + ip link set dev $swp1 master br0 + ip link set dev $swp2 master br0 + + ip link set dev br0 up + ip link set dev $swp1 up + ip link set dev $swp2 up + + tc qdisc add dev $swp1 clsact + tc qdisc add dev $swp2 clsact + + bridge vlan add vid 10-11 dev $swp1 master + bridge vlan add vid 10-11 dev $swp2 master + + ip link set dev br0 type bridge mcast_vlan_snooping 1 + check_err $? "Could not enable global vlan multicast snooping" + log_test "Vlan multicast snooping enable" +} + +switch_destroy() +{ + ip link set dev $swp2 down + ip link set dev $swp1 down + + ip link del dev br0 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + vrf_prepare + + h1_create + h2_create + + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + + h2_destroy + h1_destroy + + vrf_cleanup +} + +vlmc_v2join_test() +{ + local expect=$1 + + RET=0 + ip address add dev $h2.10 $TEST_GROUP/32 autojoin + check_err $? "Could not join $TEST_GROUP" + + sleep 5 + bridge -j mdb show dev br0 | + jq -e ".[].mdb[] | select(.grp == \"$TEST_GROUP\" and .vid == 10)" &>/dev/null + if [ $expect -eq 0 ]; then + check_err $? "IGMPv2 report didn't create mdb entry for $TEST_GROUP" + else + check_fail $? "IGMPv2 report shouldn't have created mdb entry for $TEST_GROUP" + fi + + # check if we need to cleanup + if [ $RET -eq 0 ]; then + ip address del dev $h2.10 $TEST_GROUP/32 2>&1 1>/dev/null + sleep 5 + bridge -j mdb show dev br0 | + jq -e ".[].mdb[] | select(.grp == \"$TEST_GROUP\" and \ + .vid == 10)" &>/dev/null + check_fail $? "IGMPv2 leave didn't remove mdb entry for $TEST_GROUP" + fi +} + +vlmc_control_test() +{ + RET=0 + local goutput=`bridge -j vlan global show` + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10)" &>/dev/null + check_err $? "Could not find vlan 10's global options" + log_test "Vlan global options existence" + + RET=0 + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and .mcast_snooping == 1) " &>/dev/null + check_err $? "Wrong default mcast_snooping global option value" + log_test "Vlan mcast_snooping global option default value" + + RET=0 + vlmc_v2join_test 0 + bridge vlan global set vid 10 dev br0 mcast_snooping 0 + check_err $? "Could not disable multicast snooping in vlan 10" + vlmc_v2join_test 1 + log_test "Vlan 10 multicast snooping control" +} + +# setup for general query counting +vlmc_query_cnt_xstats() +{ + local type=$1 + local version=$2 + local dev=$3 + + ip -j link xstats type bridge_slave dev $dev | \ + jq -e ".[].multicast.${type}_queries.tx_v${version}" +} + +vlmc_query_cnt_setup() +{ + local type=$1 + local dev=$2 + + if [[ $type == "igmp" ]]; then + tc filter add dev $dev egress pref 10 prot 802.1Q \ + flower vlan_id 10 vlan_ethtype ipv4 dst_ip 224.0.0.1 ip_proto 2 \ + action pass + else + tc filter add dev $dev egress pref 10 prot 802.1Q \ + flower vlan_id 10 vlan_ethtype ipv6 dst_ip ff02::1 ip_proto icmpv6 \ + action pass + fi + + ip link set dev br0 type bridge mcast_stats_enabled 1 +} + +vlmc_query_cnt_cleanup() +{ + local dev=$1 + + ip link set dev br0 type bridge mcast_stats_enabled 0 + tc filter del dev $dev egress pref 10 +} + +vlmc_check_query() +{ + local type=$1 + local version=$2 + local dev=$3 + local expect=$4 + local time=$5 + local ret=0 + + vlmc_query_cnt_setup $type $dev + + local pre_tx_xstats=$(vlmc_query_cnt_xstats $type $version $dev) + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_querier 1 + ret=$? + if [[ $ret -eq 0 ]]; then + sleep $time + + local tcstats=$(tc_rule_stats_get $dev 10 egress) + local post_tx_xstats=$(vlmc_query_cnt_xstats $type $version $dev) + + if [[ $tcstats != $expect || \ + $(($post_tx_xstats-$pre_tx_xstats)) != $expect || \ + $tcstats != $(($post_tx_xstats-$pre_tx_xstats)) ]]; then + ret=1 + fi + fi + + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_querier 0 + vlmc_query_cnt_cleanup $dev + + return $ret +} + +vlmc_querier_test() +{ + RET=0 + local goutput=`bridge -j vlan global show` + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10)" &>/dev/null + check_err $? "Could not find vlan 10's global options" + + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and .mcast_querier == 0) " &>/dev/null + check_err $? "Wrong default mcast_querier global vlan option value" + log_test "Vlan mcast_querier global option default value" + + RET=0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_querier 1 + check_err $? "Could not enable querier in vlan 10" + log_test "Vlan 10 multicast querier enable" + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_querier 0 + + RET=0 + vlmc_check_query igmp 2 $swp1 1 1 + check_err $? "No vlan tagged IGMPv2 general query packets sent" + log_test "Vlan 10 tagged IGMPv2 general query sent" + + RET=0 + vlmc_check_query mld 1 $swp1 1 1 + check_err $? "No vlan tagged MLD general query packets sent" + log_test "Vlan 10 tagged MLD general query sent" +} + +vlmc_igmp_mld_version_test() +{ + RET=0 + local goutput=`bridge -j vlan global show` + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10)" &>/dev/null + check_err $? "Could not find vlan 10's global options" + + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and .mcast_igmp_version == 2) " &>/dev/null + check_err $? "Wrong default mcast_igmp_version global vlan option value" + log_test "Vlan mcast_igmp_version global option default value" + + RET=0 + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and .mcast_mld_version == 1) " &>/dev/null + check_err $? "Wrong default mcast_mld_version global vlan option value" + log_test "Vlan mcast_mld_version global option default value" + + RET=0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_igmp_version 3 + check_err $? "Could not set mcast_igmp_version in vlan 10" + log_test "Vlan 10 mcast_igmp_version option changed to 3" + + RET=0 + vlmc_check_query igmp 3 $swp1 1 1 + check_err $? "No vlan tagged IGMPv3 general query packets sent" + log_test "Vlan 10 tagged IGMPv3 general query sent" + + RET=0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_mld_version 2 + check_err $? "Could not set mcast_mld_version in vlan 10" + log_test "Vlan 10 mcast_mld_version option changed to 2" + + RET=0 + vlmc_check_query mld 2 $swp1 1 1 + check_err $? "No vlan tagged MLDv2 general query packets sent" + log_test "Vlan 10 tagged MLDv2 general query sent" + + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_igmp_version 2 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_mld_version 1 +} + +vlmc_last_member_test() +{ + RET=0 + local goutput=`bridge -j vlan global show` + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10)" &>/dev/null + check_err $? "Could not find vlan 10's global options" + + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and \ + .mcast_last_member_count == 2) " &>/dev/null + check_err $? "Wrong default mcast_last_member_count global vlan option value" + log_test "Vlan mcast_last_member_count global option default value" + + RET=0 + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and \ + .mcast_last_member_interval == 100) " &>/dev/null + check_err $? "Wrong default mcast_last_member_interval global vlan option value" + log_test "Vlan mcast_last_member_interval global option default value" + + RET=0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_last_member_count 3 + check_err $? "Could not set mcast_last_member_count in vlan 10" + log_test "Vlan 10 mcast_last_member_count option changed to 3" + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_last_member_count 2 + + RET=0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_last_member_interval 200 + check_err $? "Could not set mcast_last_member_interval in vlan 10" + log_test "Vlan 10 mcast_last_member_interval option changed to 200" + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_last_member_interval 100 +} + +vlmc_startup_query_test() +{ + RET=0 + local goutput=`bridge -j vlan global show` + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10)" &>/dev/null + check_err $? "Could not find vlan 10's global options" + + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and \ + .mcast_startup_query_interval == 3125) " &>/dev/null + check_err $? "Wrong default mcast_startup_query_interval global vlan option value" + log_test "Vlan mcast_startup_query_interval global option default value" + + RET=0 + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and \ + .mcast_startup_query_count == 2) " &>/dev/null + check_err $? "Wrong default mcast_startup_query_count global vlan option value" + log_test "Vlan mcast_startup_query_count global option default value" + + RET=0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_startup_query_interval 100 + check_err $? "Could not set mcast_startup_query_interval in vlan 10" + vlmc_check_query igmp 2 $swp1 2 3 + check_err $? "Wrong number of tagged IGMPv2 general queries sent" + log_test "Vlan 10 mcast_startup_query_interval option changed to 100" + + RET=0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_startup_query_count 3 + check_err $? "Could not set mcast_startup_query_count in vlan 10" + vlmc_check_query igmp 2 $swp1 3 4 + check_err $? "Wrong number of tagged IGMPv2 general queries sent" + log_test "Vlan 10 mcast_startup_query_count option changed to 3" + + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_startup_query_interval 3125 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_startup_query_count 2 +} + +vlmc_membership_test() +{ + RET=0 + local goutput=`bridge -j vlan global show` + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10)" &>/dev/null + check_err $? "Could not find vlan 10's global options" + + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and \ + .mcast_membership_interval == 26000) " &>/dev/null + check_err $? "Wrong default mcast_membership_interval global vlan option value" + log_test "Vlan mcast_membership_interval global option default value" + + RET=0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_membership_interval 200 + check_err $? "Could not set mcast_membership_interval in vlan 10" + log_test "Vlan 10 mcast_membership_interval option changed to 200" + + RET=0 + vlmc_v2join_test 1 + log_test "Vlan 10 mcast_membership_interval mdb entry expire" + + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_membership_interval 26000 +} + +vlmc_querier_intvl_test() +{ + RET=0 + local goutput=`bridge -j vlan global show` + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10)" &>/dev/null + check_err $? "Could not find vlan 10's global options" + + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and \ + .mcast_querier_interval == 25500) " &>/dev/null + check_err $? "Wrong default mcast_querier_interval global vlan option value" + log_test "Vlan mcast_querier_interval global option default value" + + RET=0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_querier_interval 100 + check_err $? "Could not set mcast_querier_interval in vlan 10" + log_test "Vlan 10 mcast_querier_interval option changed to 100" + + RET=0 + ip link add dev br1 type bridge mcast_snooping 1 mcast_querier 1 vlan_filtering 1 \ + mcast_vlan_snooping 1 + bridge vlan add vid 10 dev br1 self pvid untagged + ip link set dev $h1 master br1 + ip link set dev br1 up + bridge vlan add vid 10 dev $h1 master + bridge vlan global set vid 10 dev br1 mcast_snooping 1 mcast_querier 1 + sleep 2 + ip link del dev br1 + ip addr replace 2001:db8:1::1/64 dev $h1 + vlmc_check_query igmp 2 $swp1 1 1 + check_err $? "Wrong number of IGMPv2 general queries after querier interval" + log_test "Vlan 10 mcast_querier_interval expire after outside query" + + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_querier_interval 25500 +} + +vlmc_query_intvl_test() +{ + RET=0 + local goutput=`bridge -j vlan global show` + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10)" &>/dev/null + check_err $? "Could not find vlan 10's global options" + + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and \ + .mcast_query_interval == 12500) " &>/dev/null + check_err $? "Wrong default mcast_query_interval global vlan option value" + log_test "Vlan mcast_query_interval global option default value" + + RET=0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_startup_query_count 0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_query_interval 200 + check_err $? "Could not set mcast_query_interval in vlan 10" + # 1 is sent immediately, then 2 more in the next 5 seconds + vlmc_check_query igmp 2 $swp1 3 5 + check_err $? "Wrong number of tagged IGMPv2 general queries sent" + log_test "Vlan 10 mcast_query_interval option changed to 200" + + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_startup_query_count 2 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_query_interval 12500 +} + +vlmc_query_response_intvl_test() +{ + RET=0 + local goutput=`bridge -j vlan global show` + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10)" &>/dev/null + check_err $? "Could not find vlan 10's global options" + + echo -n $goutput | + jq -e ".[].vlans[] | select(.vlan == 10 and \ + .mcast_query_response_interval == 1000) " &>/dev/null + check_err $? "Wrong default mcast_query_response_interval global vlan option value" + log_test "Vlan mcast_query_response_interval global option default value" + + RET=0 + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_query_response_interval 200 + check_err $? "Could not set mcast_query_response_interval in vlan 10" + log_test "Vlan 10 mcast_query_response_interval option changed to 200" + + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_query_response_interval 1000 +} + +vlmc_router_port_test() +{ + RET=0 + local goutput=`bridge -j -d vlan show` + echo -n $goutput | + jq -e ".[] | select(.ifname == \"$swp1\" and \ + .vlans[].vlan == 10)" &>/dev/null + check_err $? "Could not find port vlan 10's options" + + echo -n $goutput | + jq -e ".[] | select(.ifname == \"$swp1\" and \ + .vlans[].vlan == 10 and \ + .vlans[].mcast_router == 1)" &>/dev/null + check_err $? "Wrong default port mcast_router option value" + log_test "Port vlan 10 option mcast_router default value" + + RET=0 + bridge vlan set vid 10 dev $swp1 mcast_router 2 + check_err $? "Could not set port vlan 10's mcast_router option" + log_test "Port vlan 10 mcast_router option changed to 2" + + RET=0 + tc filter add dev $swp1 egress pref 10 prot 802.1Q \ + flower vlan_id 10 vlan_ethtype ipv4 dst_ip 239.1.1.1 ip_proto udp action pass + tc filter add dev $swp2 egress pref 10 prot 802.1Q \ + flower vlan_id 10 vlan_ethtype ipv4 dst_ip 239.1.1.1 ip_proto udp action pass + bridge vlan set vid 10 dev $swp2 mcast_router 0 + # we need to enable querier and disable query response interval to + # make sure packets are flooded only to router ports + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_querier 1 \ + mcast_query_response_interval 0 + bridge vlan add vid 10 dev br0 self + sleep 1 + mausezahn br0 -Q 10 -c 10 -p 128 -b 01:00:5e:01:01:01 -B 239.1.1.1 \ + -t udp "dp=1024" &>/dev/null + local swp1_tcstats=$(tc_rule_stats_get $swp1 10 egress) + if [[ $swp1_tcstats != 10 ]]; then + check_err 1 "Wrong number of vlan 10 multicast packets flooded" + fi + local swp2_tcstats=$(tc_rule_stats_get $swp2 10 egress) + check_err $swp2_tcstats "Vlan 10 multicast packets flooded to non-router port" + log_test "Flood unknown vlan multicast packets to router port only" + + tc filter del dev $swp2 egress pref 10 + tc filter del dev $swp1 egress pref 10 + bridge vlan del vid 10 dev br0 self + bridge vlan global set vid 10 dev br0 mcast_snooping 1 mcast_query_response_interval 1000 + bridge vlan set vid 10 dev $swp2 mcast_router 1 + bridge vlan set vid 10 dev $swp1 mcast_router 1 +} + +vlmc_filtering_test() +{ + RET=0 + ip link set dev br0 type bridge vlan_filtering 0 + ip -j -d link show dev br0 | \ + jq -e "select(.[0].linkinfo.info_data.mcast_vlan_snooping == 1)" &>/dev/null + check_fail $? "Vlan filtering is disabled but multicast vlan snooping is still enabled" + log_test "Disable multicast vlan snooping when vlan filtering is disabled" +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample index bf17e485684f..b0980a2efa31 100644 --- a/tools/testing/selftests/net/forwarding/forwarding.config.sample +++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample @@ -13,6 +13,8 @@ NETIFS[p5]=veth4 NETIFS[p6]=veth5 NETIFS[p7]=veth6 NETIFS[p8]=veth7 +NETIFS[p9]=veth8 +NETIFS[p10]=veth9 # Port that does not have a cable connected. NETIF_NO_CABLE=eth8 diff --git a/tools/testing/selftests/net/gro.c b/tools/testing/selftests/net/gro.c index cf37ce86b0fd..ee0b03f3dd06 100644 --- a/tools/testing/selftests/net/gro.c +++ b/tools/testing/selftests/net/gro.c @@ -64,10 +64,6 @@ #define NUM_PACKETS 4 #define START_SEQ 100 #define START_ACK 100 -#define SIP6 "fdaa::2" -#define DIP6 "fdaa::1" -#define SIP4 "192.168.1.200" -#define DIP4 "192.168.1.100" #define ETH_P_NONE 0 #define TOTAL_HDR_LEN (ETH_HLEN + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) #define MSS (4096 - sizeof(struct tcphdr) - sizeof(struct ipv6hdr)) @@ -75,6 +71,10 @@ #define NUM_LARGE_PKT (MAX_PAYLOAD / MSS) #define MAX_HDR_LEN (ETH_HLEN + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) +static const char *addr6_src = "fdaa::2"; +static const char *addr6_dst = "fdaa::1"; +static const char *addr4_src = "192.168.1.200"; +static const char *addr4_dst = "192.168.1.100"; static int proto = -1; static uint8_t src_mac[ETH_ALEN], dst_mac[ETH_ALEN]; static char *testname = "data"; @@ -178,18 +178,18 @@ static uint16_t tcp_checksum(void *buf, int payload_len) uint32_t sum = 0; if (proto == PF_INET6) { - if (inet_pton(AF_INET6, SIP6, &ph6.saddr) != 1) + if (inet_pton(AF_INET6, addr6_src, &ph6.saddr) != 1) error(1, errno, "inet_pton6 source ip pseudo"); - if (inet_pton(AF_INET6, DIP6, &ph6.daddr) != 1) + if (inet_pton(AF_INET6, addr6_dst, &ph6.daddr) != 1) error(1, errno, "inet_pton6 dest ip pseudo"); ph6.protocol = htons(IPPROTO_TCP); ph6.payload_len = htons(sizeof(struct tcphdr) + payload_len); sum = checksum_nofold(&ph6, sizeof(ph6), 0); } else if (proto == PF_INET) { - if (inet_pton(AF_INET, SIP4, &ph4.saddr) != 1) + if (inet_pton(AF_INET, addr4_src, &ph4.saddr) != 1) error(1, errno, "inet_pton source ip pseudo"); - if (inet_pton(AF_INET, DIP4, &ph4.daddr) != 1) + if (inet_pton(AF_INET, addr4_dst, &ph4.daddr) != 1) error(1, errno, "inet_pton dest ip pseudo"); ph4.protocol = htons(IPPROTO_TCP); ph4.payload_len = htons(sizeof(struct tcphdr) + payload_len); @@ -229,9 +229,9 @@ static void fill_networklayer(void *buf, int payload_len) ip6h->payload_len = htons(sizeof(struct tcphdr) + payload_len); ip6h->nexthdr = IPPROTO_TCP; ip6h->hop_limit = 8; - if (inet_pton(AF_INET6, SIP6, &ip6h->saddr) != 1) + if (inet_pton(AF_INET6, addr6_src, &ip6h->saddr) != 1) error(1, errno, "inet_pton source ip6"); - if (inet_pton(AF_INET6, DIP6, &ip6h->daddr) != 1) + if (inet_pton(AF_INET6, addr6_dst, &ip6h->daddr) != 1) error(1, errno, "inet_pton dest ip6"); } else if (proto == PF_INET) { memset(iph, 0, sizeof(*iph)); @@ -243,9 +243,9 @@ static void fill_networklayer(void *buf, int payload_len) iph->tot_len = htons(sizeof(struct tcphdr) + payload_len + sizeof(struct iphdr)); iph->frag_off = htons(0x4000); /* DF = 1, MF = 0 */ - if (inet_pton(AF_INET, SIP4, &iph->saddr) != 1) + if (inet_pton(AF_INET, addr4_src, &iph->saddr) != 1) error(1, errno, "inet_pton source ip"); - if (inet_pton(AF_INET, DIP4, &iph->daddr) != 1) + if (inet_pton(AF_INET, addr4_dst, &iph->daddr) != 1) error(1, errno, "inet_pton dest ip"); iph->check = checksum_fold(buf, sizeof(struct iphdr), 0); } @@ -731,7 +731,7 @@ static void set_timeout(int fd) { struct timeval timeout; - timeout.tv_sec = 120; + timeout.tv_sec = 3; timeout.tv_usec = 0; if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (char *)&timeout, sizeof(timeout)) < 0) @@ -1023,11 +1023,13 @@ static void gro_receiver(void) static void parse_args(int argc, char **argv) { static const struct option opts[] = { + { "daddr", required_argument, NULL, 'd' }, { "dmac", required_argument, NULL, 'D' }, { "iface", required_argument, NULL, 'i' }, { "ipv4", no_argument, NULL, '4' }, { "ipv6", no_argument, NULL, '6' }, { "rx", no_argument, NULL, 'r' }, + { "saddr", required_argument, NULL, 's' }, { "smac", required_argument, NULL, 'S' }, { "test", required_argument, NULL, 't' }, { "verbose", no_argument, NULL, 'v' }, @@ -1035,7 +1037,7 @@ static void parse_args(int argc, char **argv) }; int c; - while ((c = getopt_long(argc, argv, "46D:i:rS:t:v", opts, NULL)) != -1) { + while ((c = getopt_long(argc, argv, "46d:D:i:rs:S:t:v", opts, NULL)) != -1) { switch (c) { case '4': proto = PF_INET; @@ -1045,6 +1047,9 @@ static void parse_args(int argc, char **argv) proto = PF_INET6; ethhdr_proto = htons(ETH_P_IPV6); break; + case 'd': + addr4_dst = addr6_dst = optarg; + break; case 'D': dmac = optarg; break; @@ -1054,6 +1059,9 @@ static void parse_args(int argc, char **argv) case 'r': tx_socket = false; break; + case 's': + addr4_src = addr6_src = optarg; + break; case 'S': smac = optarg; break; @@ -1091,5 +1099,7 @@ int main(int argc, char **argv) gro_sender(); else gro_receiver(); + + fprintf(stderr, "Gro::%s test passed.\n", testname); return 0; } diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh index ecbf57f264ed..7b9d6e31b8e7 100755 --- a/tools/testing/selftests/net/icmp_redirect.sh +++ b/tools/testing/selftests/net/icmp_redirect.sh @@ -311,7 +311,7 @@ check_exception() ip -netns h1 ro get ${H1_VRF_ARG} ${H2_N2_IP} | \ grep -E -v 'mtu|redirected' | grep -q "cache" fi - log_test $? 0 "IPv4: ${desc}" + log_test $? 0 "IPv4: ${desc}" 0 # No PMTU info for test "redirect" and "mtu exception plus redirect" if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then diff --git a/tools/testing/selftests/net/mptcp/.gitignore b/tools/testing/selftests/net/mptcp/.gitignore index 7569d892967a..49daae73c41e 100644 --- a/tools/testing/selftests/net/mptcp/.gitignore +++ b/tools/testing/selftests/net/mptcp/.gitignore @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only mptcp_connect +mptcp_inq mptcp_sockopt pm_nl_ctl *.pcap diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile index bbf4e448bad9..0356c4501c99 100644 --- a/tools/testing/selftests/net/mptcp/Makefile +++ b/tools/testing/selftests/net/mptcp/Makefile @@ -8,7 +8,7 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \ simult_flows.sh mptcp_sockopt.sh -TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt +TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq TEST_FILES := settings diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config index 0faaccd21447..419e71560fd1 100644 --- a/tools/testing/selftests/net/mptcp/config +++ b/tools/testing/selftests/net/mptcp/config @@ -13,5 +13,9 @@ CONFIG_NFT_COUNTER=m CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XTABLES=m CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NF_TABLES_IPV4=y -CONFIG_NF_TABLES_IPV6=y +CONFIG_NF_TABLES_INET=y +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SOCKET=m +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IPV6_MULTIPLE_TABLES=y diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c index 95e81d557b08..a30e93c5c549 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.c +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c @@ -59,7 +59,6 @@ static enum cfg_peek cfg_peek = CFG_NONE_PEEK; static const char *cfg_host; static const char *cfg_port = "12000"; static int cfg_sock_proto = IPPROTO_MPTCP; -static bool tcpulp_audit; static int pf = AF_INET; static int cfg_sndbuf; static int cfg_rcvbuf; @@ -73,9 +72,22 @@ static uint32_t cfg_mark; struct cfg_cmsg_types { unsigned int cmsg_enabled:1; unsigned int timestampns:1; + unsigned int tcp_inq:1; }; +struct cfg_sockopt_types { + unsigned int transparent:1; +}; + +struct tcp_inq_state { + unsigned int last; + bool expect_eof; +}; + +static struct tcp_inq_state tcp_inq; + static struct cfg_cmsg_types cfg_cmsg_types; +static struct cfg_sockopt_types cfg_sockopt_types; static void die_usage(void) { @@ -90,9 +102,9 @@ static void die_usage(void) fprintf(stderr, "\t-s [MPTCP|TCP] -- use mptcp(default) or tcp sockets\n"); fprintf(stderr, "\t-m [poll|mmap|sendfile] -- use poll(default)/mmap+write/sendfile\n"); fprintf(stderr, "\t-M mark -- set socket packet mark\n"); - fprintf(stderr, "\t-u -- check mptcp ulp\n"); fprintf(stderr, "\t-w num -- wait num sec before closing the socket\n"); fprintf(stderr, "\t-c cmsg -- test cmsg type <cmsg>\n"); + fprintf(stderr, "\t-o option -- test sockopt <option>\n"); fprintf(stderr, "\t-P [saveWithPeek|saveAfterPeek] -- save data with/after MSG_PEEK form tcp socket\n"); exit(1); @@ -185,6 +197,58 @@ static void set_mark(int fd, uint32_t mark) } } +static void set_transparent(int fd, int pf) +{ + int one = 1; + + switch (pf) { + case AF_INET: + if (-1 == setsockopt(fd, SOL_IP, IP_TRANSPARENT, &one, sizeof(one))) + perror("IP_TRANSPARENT"); + break; + case AF_INET6: + if (-1 == setsockopt(fd, IPPROTO_IPV6, IPV6_TRANSPARENT, &one, sizeof(one))) + perror("IPV6_TRANSPARENT"); + break; + } +} + +static int do_ulp_so(int sock, const char *name) +{ + return setsockopt(sock, IPPROTO_TCP, TCP_ULP, name, strlen(name)); +} + +#define X(m) xerror("%s:%u: %s: failed for proto %d at line %u", __FILE__, __LINE__, (m), proto, line) +static void sock_test_tcpulp(int sock, int proto, unsigned int line) +{ + socklen_t buflen = 8; + char buf[8] = ""; + int ret = getsockopt(sock, IPPROTO_TCP, TCP_ULP, buf, &buflen); + + if (ret != 0) + X("getsockopt"); + + if (buflen > 0) { + if (strcmp(buf, "mptcp") != 0) + xerror("unexpected ULP '%s' for proto %d at line %u", buf, proto, line); + ret = do_ulp_so(sock, "tls"); + if (ret == 0) + X("setsockopt"); + } else if (proto == IPPROTO_MPTCP) { + ret = do_ulp_so(sock, "tls"); + if (ret != -1) + X("setsockopt"); + } + + ret = do_ulp_so(sock, "mptcp"); + if (ret != -1) + X("setsockopt"); + +#undef X +} + +#define SOCK_TEST_TCPULP(s, p) sock_test_tcpulp((s), (p), __LINE__) + static int sock_listen_mptcp(const char * const listenaddr, const char * const port) { @@ -208,10 +272,15 @@ static int sock_listen_mptcp(const char * const listenaddr, if (sock < 0) continue; + SOCK_TEST_TCPULP(sock, cfg_sock_proto); + if (-1 == setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one))) perror("setsockopt"); + if (cfg_sockopt_types.transparent) + set_transparent(sock, pf); + if (bind(sock, a->ai_addr, a->ai_addrlen) == 0) break; /* success */ @@ -227,50 +296,17 @@ static int sock_listen_mptcp(const char * const listenaddr, return sock; } + SOCK_TEST_TCPULP(sock, cfg_sock_proto); + if (listen(sock, 20)) { perror("listen"); close(sock); return -1; } - return sock; -} - -static bool sock_test_tcpulp(const char * const remoteaddr, - const char * const port) -{ - struct addrinfo hints = { - .ai_protocol = IPPROTO_TCP, - .ai_socktype = SOCK_STREAM, - }; - struct addrinfo *a, *addr; - int sock = -1, ret = 0; - bool test_pass = false; - - hints.ai_family = AF_INET; + SOCK_TEST_TCPULP(sock, cfg_sock_proto); - xgetaddrinfo(remoteaddr, port, &hints, &addr); - for (a = addr; a; a = a->ai_next) { - sock = socket(a->ai_family, a->ai_socktype, IPPROTO_TCP); - if (sock < 0) { - perror("socket"); - continue; - } - ret = setsockopt(sock, IPPROTO_TCP, TCP_ULP, "mptcp", - sizeof("mptcp")); - if (ret == -1 && errno == EOPNOTSUPP) - test_pass = true; - close(sock); - - if (test_pass) - break; - if (!ret) - fprintf(stderr, - "setsockopt(TCP_ULP) returned 0\n"); - else - perror("setsockopt(TCP_ULP)"); - } - return test_pass; + return sock; } static int sock_connect_mptcp(const char * const remoteaddr, @@ -293,6 +329,8 @@ static int sock_connect_mptcp(const char * const remoteaddr, continue; } + SOCK_TEST_TCPULP(sock, proto); + if (cfg_mark) set_mark(sock, cfg_mark); @@ -305,6 +343,8 @@ static int sock_connect_mptcp(const char * const remoteaddr, } freeaddrinfo(addr); + if (sock != -1) + SOCK_TEST_TCPULP(sock, proto); return sock; } @@ -364,7 +404,9 @@ static size_t do_write(const int fd, char *buf, const size_t len) static void process_cmsg(struct msghdr *msgh) { struct __kernel_timespec ts; + bool inq_found = false; bool ts_found = false; + unsigned int inq = 0; struct cmsghdr *cmsg; for (cmsg = CMSG_FIRSTHDR(msgh); cmsg ; cmsg = CMSG_NXTHDR(msgh, cmsg)) { @@ -373,12 +415,27 @@ static void process_cmsg(struct msghdr *msgh) ts_found = true; continue; } + if (cmsg->cmsg_level == IPPROTO_TCP && cmsg->cmsg_type == TCP_CM_INQ) { + memcpy(&inq, CMSG_DATA(cmsg), sizeof(inq)); + inq_found = true; + continue; + } + } if (cfg_cmsg_types.timestampns) { if (!ts_found) xerror("TIMESTAMPNS not present\n"); } + + if (cfg_cmsg_types.tcp_inq) { + if (!inq_found) + xerror("TCP_INQ not present\n"); + + if (inq > 1024) + xerror("tcp_inq %u is larger than one kbyte\n", inq); + tcp_inq.last = inq; + } } static ssize_t do_recvmsg_cmsg(const int fd, char *buf, const size_t len) @@ -395,10 +452,23 @@ static ssize_t do_recvmsg_cmsg(const int fd, char *buf, const size_t len) .msg_controllen = sizeof(msg_buf), }; int flags = 0; + unsigned int last_hint = tcp_inq.last; int ret = recvmsg(fd, &msg, flags); - if (ret <= 0) + if (ret <= 0) { + if (ret == 0 && tcp_inq.expect_eof) + return ret; + + if (ret == 0 && cfg_cmsg_types.tcp_inq) + if (last_hint != 1 && last_hint != 0) + xerror("EOF but last tcp_inq hint was %u\n", last_hint); + return ret; + } + + if (tcp_inq.expect_eof) + xerror("expected EOF, last_hint %u, now %u\n", + last_hint, tcp_inq.last); if (msg.msg_controllen && !cfg_cmsg_types.cmsg_enabled) xerror("got %lu bytes of cmsg data, expected 0\n", @@ -410,6 +480,19 @@ static ssize_t do_recvmsg_cmsg(const int fd, char *buf, const size_t len) if (msg.msg_controllen) process_cmsg(&msg); + if (cfg_cmsg_types.tcp_inq) { + if ((size_t)ret < len && last_hint > (unsigned int)ret) { + if (ret + 1 != (int)last_hint) { + int next = read(fd, msg_buf, sizeof(msg_buf)); + + xerror("read %u of %u, last_hint was %u tcp_inq hint now %u next_read returned %d/%m\n", + ret, (unsigned int)len, last_hint, tcp_inq.last, next); + } else { + tcp_inq.expect_eof = true; + } + } + } + return ret; } @@ -878,6 +961,8 @@ int main_loop_s(int listensock) check_sockaddr(pf, &ss, salen); check_getpeername(remotesock, &ss, salen); + SOCK_TEST_TCPULP(remotesock, 0); + return copyfd_io(0, remotesock, 1); } @@ -919,6 +1004,8 @@ static void apply_cmsg_types(int fd, const struct cfg_cmsg_types *cmsg) if (cmsg->timestampns) xsetsockopt(fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, &on, sizeof(on)); + if (cmsg->tcp_inq) + xsetsockopt(fd, IPPROTO_TCP, TCP_INQ, &on, sizeof(on)); } static void parse_cmsg_types(const char *type) @@ -940,10 +1027,36 @@ static void parse_cmsg_types(const char *type) return; } + if (strncmp(type, "TCPINQ", len) == 0) { + cfg_cmsg_types.tcp_inq = 1; + return; + } + fprintf(stderr, "Unrecognized cmsg option %s\n", type); exit(1); } +static void parse_setsock_options(const char *name) +{ + char *next = strchr(name, ','); + unsigned int len = 0; + + if (next) { + parse_setsock_options(next + 1); + len = next - name; + } else { + len = strlen(name); + } + + if (strncmp(name, "TRANSPARENT", len) == 0) { + cfg_sockopt_types.transparent = 1; + return; + } + + fprintf(stderr, "Unrecognized setsockopt option %s\n", name); + exit(1); +} + int main_loop(void) { int fd; @@ -955,6 +1068,8 @@ int main_loop(void) check_getpeername_connect(fd); + SOCK_TEST_TCPULP(fd, cfg_sock_proto); + if (cfg_rcvbuf) set_rcvbuf(fd, cfg_rcvbuf); if (cfg_sndbuf) @@ -1047,7 +1162,7 @@ static void parse_opts(int argc, char **argv) { int c; - while ((c = getopt(argc, argv, "6jr:lp:s:hut:T:m:S:R:w:M:P:c:")) != -1) { + while ((c = getopt(argc, argv, "6jr:lp:s:ht:T:m:S:R:w:M:P:c:o:")) != -1) { switch (c) { case 'j': cfg_join = true; @@ -1073,9 +1188,6 @@ static void parse_opts(int argc, char **argv) case 'h': die_usage(); break; - case 'u': - tcpulp_audit = true; - break; case '6': pf = AF_INET6; break; @@ -1108,6 +1220,9 @@ static void parse_opts(int argc, char **argv) case 'c': parse_cmsg_types(optarg); break; + case 'o': + parse_setsock_options(optarg); + break; } } @@ -1126,9 +1241,6 @@ int main(int argc, char *argv[]) signal(SIGUSR1, handle_signal); parse_opts(argc, argv); - if (tcpulp_audit) - return sock_test_tcpulp(cfg_host, cfg_port) ? 0 : 1; - if (listen_mode) { int fd = sock_listen_mptcp(cfg_host, cfg_port); diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh index 559173a8e387..ee28f82a6c89 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh @@ -296,24 +296,6 @@ check_mptcp_disabled() return 0 } -check_mptcp_ulp_setsockopt() -{ - local t retval - t="ns_ulp-$sech-$(mktemp -u XXXXXX)" - - ip netns add ${t} || exit $ksft_skip - if ! ip netns exec ${t} ./mptcp_connect -u -p 10000 -s TCP 127.0.0.1 2>&1; then - printf "setsockopt(..., TCP_ULP, \"mptcp\", ...) allowed\t[ FAIL ]\n" - retval=1 - ret=$retval - else - printf "setsockopt(..., TCP_ULP, \"mptcp\", ...) blocked\t[ OK ]\n" - retval=0 - fi - ip netns del ${t} - return $retval -} - # $1: IP address is_v6() { @@ -671,6 +653,82 @@ run_tests() run_tests_lo $1 $2 $3 0 } +run_test_transparent() +{ + local connect_addr="$1" + local msg="$2" + + local connector_ns="$ns1" + local listener_ns="$ns2" + local lret=0 + local r6flag="" + + # skip if we don't want v6 + if ! $ipv6 && is_v6 "${connect_addr}"; then + return 0 + fi + +ip netns exec "$listener_ns" nft -f /dev/stdin <<"EOF" +flush ruleset +table inet mangle { + chain divert { + type filter hook prerouting priority -150; + + meta l4proto tcp socket transparent 1 meta mark set 1 accept + tcp dport 20000 tproxy to :20000 meta mark set 1 accept + } +} +EOF + if [ $? -ne 0 ]; then + echo "SKIP: $msg, could not load nft ruleset" + return + fi + + local local_addr + if is_v6 "${connect_addr}"; then + local_addr="::" + r6flag="-6" + else + local_addr="0.0.0.0" + fi + + ip -net "$listener_ns" $r6flag rule add fwmark 1 lookup 100 + if [ $? -ne 0 ]; then + ip netns exec "$listener_ns" nft flush ruleset + echo "SKIP: $msg, ip $r6flag rule failed" + return + fi + + ip -net "$listener_ns" route add local $local_addr/0 dev lo table 100 + if [ $? -ne 0 ]; then + ip netns exec "$listener_ns" nft flush ruleset + ip -net "$listener_ns" $r6flag rule del fwmark 1 lookup 100 + echo "SKIP: $msg, ip route add local $local_addr failed" + return + fi + + echo "INFO: test $msg" + + TEST_COUNT=10000 + local extra_args="-o TRANSPARENT" + do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP \ + ${connect_addr} ${local_addr} "${extra_args}" + lret=$? + + ip netns exec "$listener_ns" nft flush ruleset + ip -net "$listener_ns" $r6flag rule del fwmark 1 lookup 100 + ip -net "$listener_ns" route del local $local_addr/0 dev lo table 100 + + if [ $lret -ne 0 ]; then + echo "FAIL: $msg, mptcp connection error" 1>&2 + ret=$lret + return 1 + fi + + echo "PASS: $msg" + return 0 +} + run_tests_peekmode() { local peekmode="$1" @@ -704,8 +762,6 @@ make_file "$sin" "server" check_mptcp_disabled -check_mptcp_ulp_setsockopt - stop_if_error "The kernel configuration is not valid for MPTCP" echo "INFO: validating network environment with pings" @@ -794,5 +850,9 @@ run_tests_peekmode "saveWithPeek" run_tests_peekmode "saveAfterPeek" stop_if_error "Tests with peek mode have failed" +# connect to ns4 ip address, ns2 should intercept/proxy +run_test_transparent 10.0.3.1 "tproxy ipv4" +run_test_transparent dead:beef:3::1 "tproxy ipv6" + display_time exit $ret diff --git a/tools/testing/selftests/net/mptcp/mptcp_inq.c b/tools/testing/selftests/net/mptcp/mptcp_inq.c new file mode 100644 index 000000000000..29f75e2a1116 --- /dev/null +++ b/tools/testing/selftests/net/mptcp/mptcp_inq.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE + +#include <assert.h> +#include <errno.h> +#include <fcntl.h> +#include <limits.h> +#include <string.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <inttypes.h> +#include <stdio.h> +#include <stdlib.h> +#include <strings.h> +#include <unistd.h> +#include <time.h> + +#include <sys/ioctl.h> +#include <sys/socket.h> +#include <sys/types.h> +#include <sys/wait.h> + +#include <netdb.h> +#include <netinet/in.h> + +#include <linux/tcp.h> +#include <linux/sockios.h> + +#ifndef IPPROTO_MPTCP +#define IPPROTO_MPTCP 262 +#endif +#ifndef SOL_MPTCP +#define SOL_MPTCP 284 +#endif + +static int pf = AF_INET; +static int proto_tx = IPPROTO_MPTCP; +static int proto_rx = IPPROTO_MPTCP; + +static void die_perror(const char *msg) +{ + perror(msg); + exit(1); +} + +static void die_usage(int r) +{ + fprintf(stderr, "Usage: mptcp_inq [-6] [ -t tcp|mptcp ] [ -r tcp|mptcp]\n"); + exit(r); +} + +static void xerror(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + fputc('\n', stderr); + exit(1); +} + +static const char *getxinfo_strerr(int err) +{ + if (err == EAI_SYSTEM) + return strerror(errno); + + return gai_strerror(err); +} + +static void xgetaddrinfo(const char *node, const char *service, + const struct addrinfo *hints, + struct addrinfo **res) +{ + int err = getaddrinfo(node, service, hints, res); + + if (err) { + const char *errstr = getxinfo_strerr(err); + + fprintf(stderr, "Fatal: getaddrinfo(%s:%s): %s\n", + node ? node : "", service ? service : "", errstr); + exit(1); + } +} + +static int sock_listen_mptcp(const char * const listenaddr, + const char * const port) +{ + int sock; + struct addrinfo hints = { + .ai_protocol = IPPROTO_TCP, + .ai_socktype = SOCK_STREAM, + .ai_flags = AI_PASSIVE | AI_NUMERICHOST + }; + + hints.ai_family = pf; + + struct addrinfo *a, *addr; + int one = 1; + + xgetaddrinfo(listenaddr, port, &hints, &addr); + hints.ai_family = pf; + + for (a = addr; a; a = a->ai_next) { + sock = socket(a->ai_family, a->ai_socktype, proto_rx); + if (sock < 0) + continue; + + if (-1 == setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one))) + perror("setsockopt"); + + if (bind(sock, a->ai_addr, a->ai_addrlen) == 0) + break; /* success */ + + perror("bind"); + close(sock); + sock = -1; + } + + freeaddrinfo(addr); + + if (sock < 0) + xerror("could not create listen socket"); + + if (listen(sock, 20)) + die_perror("listen"); + + return sock; +} + +static int sock_connect_mptcp(const char * const remoteaddr, + const char * const port, int proto) +{ + struct addrinfo hints = { + .ai_protocol = IPPROTO_TCP, + .ai_socktype = SOCK_STREAM, + }; + struct addrinfo *a, *addr; + int sock = -1; + + hints.ai_family = pf; + + xgetaddrinfo(remoteaddr, port, &hints, &addr); + for (a = addr; a; a = a->ai_next) { + sock = socket(a->ai_family, a->ai_socktype, proto); + if (sock < 0) + continue; + + if (connect(sock, a->ai_addr, a->ai_addrlen) == 0) + break; /* success */ + + die_perror("connect"); + } + + if (sock < 0) + xerror("could not create connect socket"); + + freeaddrinfo(addr); + return sock; +} + +static int protostr_to_num(const char *s) +{ + if (strcasecmp(s, "tcp") == 0) + return IPPROTO_TCP; + if (strcasecmp(s, "mptcp") == 0) + return IPPROTO_MPTCP; + + die_usage(1); + return 0; +} + +static void parse_opts(int argc, char **argv) +{ + int c; + + while ((c = getopt(argc, argv, "h6t:r:")) != -1) { + switch (c) { + case 'h': + die_usage(0); + break; + case '6': + pf = AF_INET6; + break; + case 't': + proto_tx = protostr_to_num(optarg); + break; + case 'r': + proto_rx = protostr_to_num(optarg); + break; + default: + die_usage(1); + break; + } + } +} + +/* wait up to timeout milliseconds */ +static void wait_for_ack(int fd, int timeout, size_t total) +{ + int i; + + for (i = 0; i < timeout; i++) { + int nsd, ret, queued = -1; + struct timespec req; + + ret = ioctl(fd, TIOCOUTQ, &queued); + if (ret < 0) + die_perror("TIOCOUTQ"); + + ret = ioctl(fd, SIOCOUTQNSD, &nsd); + if (ret < 0) + die_perror("SIOCOUTQNSD"); + + if ((size_t)queued > total) + xerror("TIOCOUTQ %u, but only %zu expected\n", queued, total); + assert(nsd <= queued); + + if (queued == 0) + return; + + /* wait for peer to ack rx of all data */ + req.tv_sec = 0; + req.tv_nsec = 1 * 1000 * 1000ul; /* 1ms */ + nanosleep(&req, NULL); + } + + xerror("still tx data queued after %u ms\n", timeout); +} + +static void connect_one_server(int fd, int unixfd) +{ + size_t len, i, total, sent; + char buf[4096], buf2[4096]; + ssize_t ret; + + len = rand() % (sizeof(buf) - 1); + + if (len < 128) + len = 128; + + for (i = 0; i < len ; i++) { + buf[i] = rand() % 26; + buf[i] += 'A'; + } + + buf[i] = '\n'; + + /* un-block server */ + ret = read(unixfd, buf2, 4); + assert(ret == 4); + + assert(strncmp(buf2, "xmit", 4) == 0); + + ret = write(unixfd, &len, sizeof(len)); + assert(ret == (ssize_t)sizeof(len)); + + ret = write(fd, buf, len); + if (ret < 0) + die_perror("write"); + + if (ret != (ssize_t)len) + xerror("short write"); + + ret = read(unixfd, buf2, 4); + assert(strncmp(buf2, "huge", 4) == 0); + + total = rand() % (16 * 1024 * 1024); + total += (1 * 1024 * 1024); + sent = total; + + ret = write(unixfd, &total, sizeof(total)); + assert(ret == (ssize_t)sizeof(total)); + + wait_for_ack(fd, 5000, len); + + while (total > 0) { + if (total > sizeof(buf)) + len = sizeof(buf); + else + len = total; + + ret = write(fd, buf, len); + if (ret < 0) + die_perror("write"); + total -= ret; + + /* we don't have to care about buf content, only + * number of total bytes sent + */ + } + + ret = read(unixfd, buf2, 4); + assert(ret == 4); + assert(strncmp(buf2, "shut", 4) == 0); + + wait_for_ack(fd, 5000, sent); + + ret = write(fd, buf, 1); + assert(ret == 1); + close(fd); + ret = write(unixfd, "closed", 6); + assert(ret == 6); + + close(unixfd); +} + +static void get_tcp_inq(struct msghdr *msgh, unsigned int *inqv) +{ + struct cmsghdr *cmsg; + + for (cmsg = CMSG_FIRSTHDR(msgh); cmsg ; cmsg = CMSG_NXTHDR(msgh, cmsg)) { + if (cmsg->cmsg_level == IPPROTO_TCP && cmsg->cmsg_type == TCP_CM_INQ) { + memcpy(inqv, CMSG_DATA(cmsg), sizeof(*inqv)); + return; + } + } + + xerror("could not find TCP_CM_INQ cmsg type"); +} + +static void process_one_client(int fd, int unixfd) +{ + unsigned int tcp_inq; + size_t expect_len; + char msg_buf[4096]; + char buf[4096]; + char tmp[16]; + struct iovec iov = { + .iov_base = buf, + .iov_len = 1, + }; + struct msghdr msg = { + .msg_iov = &iov, + .msg_iovlen = 1, + .msg_control = msg_buf, + .msg_controllen = sizeof(msg_buf), + }; + ssize_t ret, tot; + + ret = write(unixfd, "xmit", 4); + assert(ret == 4); + + ret = read(unixfd, &expect_len, sizeof(expect_len)); + assert(ret == (ssize_t)sizeof(expect_len)); + + if (expect_len > sizeof(buf)) + xerror("expect len %zu exceeds buffer size", expect_len); + + for (;;) { + struct timespec req; + unsigned int queued; + + ret = ioctl(fd, FIONREAD, &queued); + if (ret < 0) + die_perror("FIONREAD"); + if (queued > expect_len) + xerror("FIONREAD returned %u, but only %zu expected\n", + queued, expect_len); + if (queued == expect_len) + break; + + req.tv_sec = 0; + req.tv_nsec = 1000 * 1000ul; + nanosleep(&req, NULL); + } + + /* read one byte, expect cmsg to return expected - 1 */ + ret = recvmsg(fd, &msg, 0); + if (ret < 0) + die_perror("recvmsg"); + + if (msg.msg_controllen == 0) + xerror("msg_controllen is 0"); + + get_tcp_inq(&msg, &tcp_inq); + + assert((size_t)tcp_inq == (expect_len - 1)); + + iov.iov_len = sizeof(buf); + ret = recvmsg(fd, &msg, 0); + if (ret < 0) + die_perror("recvmsg"); + + /* should have gotten exact remainder of all pending data */ + assert(ret == (ssize_t)tcp_inq); + + /* should be 0, all drained */ + get_tcp_inq(&msg, &tcp_inq); + assert(tcp_inq == 0); + + /* request a large swath of data. */ + ret = write(unixfd, "huge", 4); + assert(ret == 4); + + ret = read(unixfd, &expect_len, sizeof(expect_len)); + assert(ret == (ssize_t)sizeof(expect_len)); + + /* peer should send us a few mb of data */ + if (expect_len <= sizeof(buf)) + xerror("expect len %zu too small\n", expect_len); + + tot = 0; + do { + iov.iov_len = sizeof(buf); + ret = recvmsg(fd, &msg, 0); + if (ret < 0) + die_perror("recvmsg"); + + tot += ret; + + get_tcp_inq(&msg, &tcp_inq); + + if (tcp_inq > expect_len - tot) + xerror("inq %d, remaining %d total_len %d\n", + tcp_inq, expect_len - tot, (int)expect_len); + + assert(tcp_inq <= expect_len - tot); + } while ((size_t)tot < expect_len); + + ret = write(unixfd, "shut", 4); + assert(ret == 4); + + /* wait for hangup. Should have received one more byte of data. */ + ret = read(unixfd, tmp, sizeof(tmp)); + assert(ret == 6); + assert(strncmp(tmp, "closed", 6) == 0); + + sleep(1); + + iov.iov_len = 1; + ret = recvmsg(fd, &msg, 0); + if (ret < 0) + die_perror("recvmsg"); + assert(ret == 1); + + get_tcp_inq(&msg, &tcp_inq); + + /* tcp_inq should be 1 due to received fin. */ + assert(tcp_inq == 1); + + iov.iov_len = 1; + ret = recvmsg(fd, &msg, 0); + if (ret < 0) + die_perror("recvmsg"); + + /* expect EOF */ + assert(ret == 0); + get_tcp_inq(&msg, &tcp_inq); + assert(tcp_inq == 1); + + close(fd); +} + +static int xaccept(int s) +{ + int fd = accept(s, NULL, 0); + + if (fd < 0) + die_perror("accept"); + + return fd; +} + +static int server(int unixfd) +{ + int fd = -1, r, on = 1; + + switch (pf) { + case AF_INET: + fd = sock_listen_mptcp("127.0.0.1", "15432"); + break; + case AF_INET6: + fd = sock_listen_mptcp("::1", "15432"); + break; + default: + xerror("Unknown pf %d\n", pf); + break; + } + + r = write(unixfd, "conn", 4); + assert(r == 4); + + alarm(15); + r = xaccept(fd); + + if (-1 == setsockopt(r, IPPROTO_TCP, TCP_INQ, &on, sizeof(on))) + die_perror("setsockopt"); + + process_one_client(r, unixfd); + + return 0; +} + +static int client(int unixfd) +{ + int fd = -1; + + alarm(15); + + switch (pf) { + case AF_INET: + fd = sock_connect_mptcp("127.0.0.1", "15432", proto_tx); + break; + case AF_INET6: + fd = sock_connect_mptcp("::1", "15432", proto_tx); + break; + default: + xerror("Unknown pf %d\n", pf); + } + + connect_one_server(fd, unixfd); + + return 0; +} + +static void init_rng(void) +{ + int fd = open("/dev/urandom", O_RDONLY); + unsigned int foo; + + if (fd > 0) { + int ret = read(fd, &foo, sizeof(foo)); + + if (ret < 0) + srand(fd + foo); + close(fd); + } + + srand(foo); +} + +static pid_t xfork(void) +{ + pid_t p = fork(); + + if (p < 0) + die_perror("fork"); + else if (p == 0) + init_rng(); + + return p; +} + +static int rcheck(int wstatus, const char *what) +{ + if (WIFEXITED(wstatus)) { + if (WEXITSTATUS(wstatus) == 0) + return 0; + fprintf(stderr, "%s exited, status=%d\n", what, WEXITSTATUS(wstatus)); + return WEXITSTATUS(wstatus); + } else if (WIFSIGNALED(wstatus)) { + xerror("%s killed by signal %d\n", what, WTERMSIG(wstatus)); + } else if (WIFSTOPPED(wstatus)) { + xerror("%s stopped by signal %d\n", what, WSTOPSIG(wstatus)); + } + + return 111; +} + +int main(int argc, char *argv[]) +{ + int e1, e2, wstatus; + pid_t s, c, ret; + int unixfds[2]; + + parse_opts(argc, argv); + + e1 = socketpair(AF_UNIX, SOCK_DGRAM, 0, unixfds); + if (e1 < 0) + die_perror("pipe"); + + s = xfork(); + if (s == 0) + return server(unixfds[1]); + + close(unixfds[1]); + + /* wait until server bound a socket */ + e1 = read(unixfds[0], &e1, 4); + assert(e1 == 4); + + c = xfork(); + if (c == 0) + return client(unixfds[0]); + + close(unixfds[0]); + + ret = waitpid(s, &wstatus, 0); + if (ret == -1) + die_perror("waitpid"); + e1 = rcheck(wstatus, "server"); + ret = waitpid(c, &wstatus, 0); + if (ret == -1) + die_perror("waitpid"); + e2 = rcheck(wstatus, "client"); + + return e1 ? e1 : e2; +} diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c index 417b11cafafe..ac9a4d9c1764 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c +++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c @@ -4,6 +4,7 @@ #include <assert.h> #include <errno.h> +#include <fcntl.h> #include <limits.h> #include <string.h> #include <stdarg.h> @@ -13,6 +14,7 @@ #include <stdio.h> #include <stdlib.h> #include <strings.h> +#include <time.h> #include <unistd.h> #include <sys/socket.h> @@ -594,6 +596,44 @@ static int server(int pipefd) return 0; } +static void test_ip_tos_sockopt(int fd) +{ + uint8_t tos_in, tos_out; + socklen_t s; + int r; + + tos_in = rand() & 0xfc; + r = setsockopt(fd, SOL_IP, IP_TOS, &tos_in, sizeof(tos_out)); + if (r != 0) + die_perror("setsockopt IP_TOS"); + + tos_out = 0; + s = sizeof(tos_out); + r = getsockopt(fd, SOL_IP, IP_TOS, &tos_out, &s); + if (r != 0) + die_perror("getsockopt IP_TOS"); + + if (tos_in != tos_out) + xerror("tos %x != %x socklen_t %d\n", tos_in, tos_out, s); + + if (s != 1) + xerror("tos should be 1 byte"); + + s = 0; + r = getsockopt(fd, SOL_IP, IP_TOS, &tos_out, &s); + if (r != 0) + die_perror("getsockopt IP_TOS 0"); + if (s != 0) + xerror("expect socklen_t == 0"); + + s = -1; + r = getsockopt(fd, SOL_IP, IP_TOS, &tos_out, &s); + if (r != -1 && errno != EINVAL) + die_perror("getsockopt IP_TOS did not indicate -EINVAL"); + if (s != -1) + xerror("expect socklen_t == -1"); +} + static int client(int pipefd) { int fd = -1; @@ -611,6 +651,8 @@ static int client(int pipefd) xerror("Unknown pf %d\n", pf); } + test_ip_tos_sockopt(fd); + connect_one_server(fd, pipefd); return 0; @@ -642,6 +684,25 @@ static int rcheck(int wstatus, const char *what) return 111; } +static void init_rng(void) +{ + int fd = open("/dev/urandom", O_RDONLY); + + if (fd >= 0) { + unsigned int foo; + ssize_t ret; + + /* can't fail */ + ret = read(fd, &foo, sizeof(foo)); + assert(ret == sizeof(foo)); + + close(fd); + srand(foo); + } else { + srand(time(NULL)); + } +} + int main(int argc, char *argv[]) { int e1, e2, wstatus; @@ -650,6 +711,8 @@ int main(int argc, char *argv[]) parse_opts(argc, argv); + init_rng(); + e1 = pipe(pipefds); if (e1 < 0) die_perror("pipe"); diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh index 41de643788b8..0879da915014 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh @@ -178,7 +178,7 @@ do_transfer() timeout ${timeout_test} \ ip netns exec ${listener_ns} \ - $mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c TIMESTAMPNS \ + $mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c TIMESTAMPNS,TCPINQ \ ${local_addr} < "$sin" > "$sout" & spid=$! @@ -186,7 +186,7 @@ do_transfer() timeout ${timeout_test} \ ip netns exec ${connector_ns} \ - $mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c TIMESTAMPNS \ + $mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c TIMESTAMPNS,TCPINQ \ $connect_addr < "$cin" > "$cout" & cpid=$! @@ -279,6 +279,45 @@ run_tests() fi } +do_tcpinq_test() +{ + ip netns exec "$ns1" ./mptcp_inq "$@" + lret=$? + if [ $lret -ne 0 ];then + ret=$lret + echo "FAIL: mptcp_inq $@" 1>&2 + return $lret + fi + + echo "PASS: TCP_INQ cmsg/ioctl $@" + return $lret +} + +do_tcpinq_tests() +{ + local lret=0 + + ip netns exec "$ns1" iptables -F + ip netns exec "$ns1" ip6tables -F + + for args in "-t tcp" "-r tcp"; do + do_tcpinq_test $args + lret=$? + if [ $lret -ne 0 ] ; then + return $lret + fi + do_tcpinq_test -6 $args + lret=$? + if [ $lret -ne 0 ] ; then + return $lret + fi + done + + do_tcpinq_test -r tcp -t tcp + + return $? +} + sin=$(mktemp) sout=$(mktemp) cin=$(mktemp) @@ -300,4 +339,5 @@ if [ $ret -eq 0 ];then echo "PASS: SOL_MPTCP getsockopt has expected information" fi +do_tcpinq_tests exit $ret diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index e61fc4c32ba2..6e468e0f42f7 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -31,6 +31,8 @@ struct tls_crypto_info_keys { struct tls12_crypto_info_chacha20_poly1305 chacha20; struct tls12_crypto_info_sm4_gcm sm4gcm; struct tls12_crypto_info_sm4_ccm sm4ccm; + struct tls12_crypto_info_aes_ccm_128 aesccm128; + struct tls12_crypto_info_aes_gcm_256 aesgcm256; }; size_t len; }; @@ -61,6 +63,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type, tls12->sm4ccm.info.version = tls_version; tls12->sm4ccm.info.cipher_type = cipher_type; break; + case TLS_CIPHER_AES_CCM_128: + tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128); + tls12->aesccm128.info.version = tls_version; + tls12->aesccm128.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_AES_GCM_256: + tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256); + tls12->aesgcm256.info.version = tls_version; + tls12->aesgcm256.info.cipher_type = cipher_type; + break; default: break; } @@ -78,26 +90,21 @@ static void memrnd(void *s, size_t n) *byte++ = rand(); } -FIXTURE(tls_basic) -{ - int fd, cfd; - bool notls; -}; - -FIXTURE_SETUP(tls_basic) +static void ulp_sock_pair(struct __test_metadata *_metadata, + int *fd, int *cfd, bool *notls) { struct sockaddr_in addr; socklen_t len; int sfd, ret; - self->notls = false; + *notls = false; len = sizeof(addr); addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl(INADDR_ANY); addr.sin_port = 0; - self->fd = socket(AF_INET, SOCK_STREAM, 0); + *fd = socket(AF_INET, SOCK_STREAM, 0); sfd = socket(AF_INET, SOCK_STREAM, 0); ret = bind(sfd, &addr, sizeof(addr)); @@ -108,26 +115,96 @@ FIXTURE_SETUP(tls_basic) ret = getsockname(sfd, &addr, &len); ASSERT_EQ(ret, 0); - ret = connect(self->fd, &addr, sizeof(addr)); + ret = connect(*fd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); - self->cfd = accept(sfd, &addr, &len); - ASSERT_GE(self->cfd, 0); + *cfd = accept(sfd, &addr, &len); + ASSERT_GE(*cfd, 0); close(sfd); - ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + ret = setsockopt(*fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); if (ret != 0) { ASSERT_EQ(errno, ENOENT); - self->notls = true; + *notls = true; printf("Failure setting TCP_ULP, testing without tls\n"); return; } - ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + ret = setsockopt(*cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); ASSERT_EQ(ret, 0); } +/* Produce a basic cmsg */ +static int tls_send_cmsg(int fd, unsigned char record_type, + void *data, size_t len, int flags) +{ + char cbuf[CMSG_SPACE(sizeof(char))]; + int cmsg_len = sizeof(char); + struct cmsghdr *cmsg; + struct msghdr msg; + struct iovec vec; + + vec.iov_base = data; + vec.iov_len = len; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_TLS; + /* test sending non-record types. */ + cmsg->cmsg_type = TLS_SET_RECORD_TYPE; + cmsg->cmsg_len = CMSG_LEN(cmsg_len); + *CMSG_DATA(cmsg) = record_type; + msg.msg_controllen = cmsg->cmsg_len; + + return sendmsg(fd, &msg, flags); +} + +static int tls_recv_cmsg(struct __test_metadata *_metadata, + int fd, unsigned char record_type, + void *data, size_t len, int flags) +{ + char cbuf[CMSG_SPACE(sizeof(char))]; + struct cmsghdr *cmsg; + unsigned char ctype; + struct msghdr msg; + struct iovec vec; + int n; + + vec.iov_base = data; + vec.iov_len = len; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + + n = recvmsg(fd, &msg, flags); + + cmsg = CMSG_FIRSTHDR(&msg); + EXPECT_NE(cmsg, NULL); + EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); + EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); + ctype = *((unsigned char *)CMSG_DATA(cmsg)); + EXPECT_EQ(ctype, record_type); + + return n; +} + +FIXTURE(tls_basic) +{ + int fd, cfd; + bool notls; +}; + +FIXTURE_SETUP(tls_basic) +{ + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); +} + FIXTURE_TEARDOWN(tls_basic) { close(self->fd); @@ -196,63 +273,48 @@ FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm) .cipher_type = TLS_CIPHER_SM4_CCM, }; +FIXTURE_VARIANT_ADD(tls, 12_aes_ccm) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; + +FIXTURE_VARIANT_ADD(tls, 13_aes_ccm) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; + +FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; + +FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; + FIXTURE_SETUP(tls) { struct tls_crypto_info_keys tls12; - struct sockaddr_in addr; - socklen_t len; - int sfd, ret; - - self->notls = false; - len = sizeof(addr); + int ret; tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12); - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_ANY); - addr.sin_port = 0; - - self->fd = socket(AF_INET, SOCK_STREAM, 0); - sfd = socket(AF_INET, SOCK_STREAM, 0); + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); - ret = bind(sfd, &addr, sizeof(addr)); - ASSERT_EQ(ret, 0); - ret = listen(sfd, 10); - ASSERT_EQ(ret, 0); + if (self->notls) + return; - ret = getsockname(sfd, &addr, &len); + ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len); ASSERT_EQ(ret, 0); - ret = connect(self->fd, &addr, sizeof(addr)); + ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len); ASSERT_EQ(ret, 0); - - ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); - if (ret != 0) { - self->notls = true; - printf("Failure setting TCP_ULP, testing without tls\n"); - } - - if (!self->notls) { - ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, - tls12.len); - ASSERT_EQ(ret, 0); - } - - self->cfd = accept(sfd, &addr, &len); - ASSERT_GE(self->cfd, 0); - - if (!self->notls) { - ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", - sizeof("tls")); - ASSERT_EQ(ret, 0); - - ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, - tls12.len); - ASSERT_EQ(ret, 0); - } - - close(sfd); } FIXTURE_TEARDOWN(tls) @@ -613,6 +675,95 @@ TEST_F(tls, splice_to_pipe) EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); } +TEST_F(tls, splice_cmsg_to_pipe) +{ + char *test_str = "test_read"; + char record_type = 100; + int send_len = 10; + char buf[10]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1); + EXPECT_EQ(errno, EINVAL); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(errno, EIO); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, splice_dec_cmsg_to_pipe) +{ + char *test_str = "test_read"; + char record_type = 100; + int send_len = 10; + char buf[10]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(errno, EIO); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1); + EXPECT_EQ(errno, EINVAL); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, recv_and_splice) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int half = send_len / 2; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len); + /* Recv hald of the record, splice the other half */ + EXPECT_EQ(recv(self->cfd, mem_recv, half, MSG_WAITALL), half); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, half, SPLICE_F_NONBLOCK), + half); + EXPECT_EQ(read(p[0], &mem_recv[half], half), half); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, peek_and_splice) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int chunk = TLS_PAYLOAD_MAX_LEN / 4; + int n, i, p[2]; + + memrnd(mem_send, sizeof(mem_send)); + + ASSERT_GE(pipe(p), 0); + for (i = 0; i < 4; i++) + EXPECT_EQ(send(self->fd, &mem_send[chunk * i], chunk, 0), + chunk); + + EXPECT_EQ(recv(self->cfd, mem_recv, chunk * 5 / 2, + MSG_WAITALL | MSG_PEEK), + chunk * 5 / 2); + EXPECT_EQ(memcmp(mem_send, mem_recv, chunk * 5 / 2), 0); + + n = 0; + while (n < send_len) { + i = splice(self->cfd, NULL, p[1], NULL, send_len - n, 0); + EXPECT_GT(i, 0); + n += i; + } + EXPECT_EQ(n, send_len); + EXPECT_EQ(read(p[0], mem_recv, send_len), send_len); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + TEST_F(tls, recvmsg_single) { char const *test_str = "test_recvmsg_single"; @@ -1193,60 +1344,30 @@ TEST_F(tls, mutliproc_sendpage_writers) TEST_F(tls, control_msg) { - if (self->notls) - return; - - char cbuf[CMSG_SPACE(sizeof(char))]; - char const *test_str = "test_read"; - int cmsg_len = sizeof(char); + char *test_str = "test_read"; char record_type = 100; - struct cmsghdr *cmsg; - struct msghdr msg; int send_len = 10; - struct iovec vec; char buf[10]; - vec.iov_base = (char *)test_str; - vec.iov_len = 10; - memset(&msg, 0, sizeof(struct msghdr)); - msg.msg_iov = &vec; - msg.msg_iovlen = 1; - msg.msg_control = cbuf; - msg.msg_controllen = sizeof(cbuf); - cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_level = SOL_TLS; - /* test sending non-record types. */ - cmsg->cmsg_type = TLS_SET_RECORD_TYPE; - cmsg->cmsg_len = CMSG_LEN(cmsg_len); - *CMSG_DATA(cmsg) = record_type; - msg.msg_controllen = cmsg->cmsg_len; + if (self->notls) + SKIP(return, "no TLS support"); - EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len); + EXPECT_EQ(tls_send_cmsg(self->fd, record_type, test_str, send_len, 0), + send_len); /* Should fail because we didn't provide a control message */ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); - vec.iov_base = buf; - EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL | MSG_PEEK), send_len); - - cmsg = CMSG_FIRSTHDR(&msg); - EXPECT_NE(cmsg, NULL); - EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); - EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); - record_type = *((unsigned char *)CMSG_DATA(cmsg)); - EXPECT_EQ(record_type, 100); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL | MSG_PEEK), + send_len); EXPECT_EQ(memcmp(buf, test_str, send_len), 0); /* Recv the message again without MSG_PEEK */ - record_type = 0; memset(buf, 0, sizeof(buf)); - EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len); - cmsg = CMSG_FIRSTHDR(&msg); - EXPECT_NE(cmsg, NULL); - EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); - EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); - record_type = *((unsigned char *)CMSG_DATA(cmsg)); - EXPECT_EQ(record_type, 100); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); EXPECT_EQ(memcmp(buf, test_str, send_len), 0); } @@ -1301,6 +1422,160 @@ TEST_F(tls, shutdown_reuse) EXPECT_EQ(errno, EISCONN); } +FIXTURE(tls_err) +{ + int fd, cfd; + int fd2, cfd2; + bool notls; +}; + +FIXTURE_VARIANT(tls_err) +{ + uint16_t tls_version; +}; + +FIXTURE_VARIANT_ADD(tls_err, 12_aes_gcm) +{ + .tls_version = TLS_1_2_VERSION, +}; + +FIXTURE_VARIANT_ADD(tls_err, 13_aes_gcm) +{ + .tls_version = TLS_1_3_VERSION, +}; + +FIXTURE_SETUP(tls_err) +{ + struct tls_crypto_info_keys tls12; + int ret; + + tls_crypto_info_init(variant->tls_version, TLS_CIPHER_AES_GCM_128, + &tls12); + + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); + ulp_sock_pair(_metadata, &self->fd2, &self->cfd2, &self->notls); + if (self->notls) + return; + + ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); + + ret = setsockopt(self->cfd2, SOL_TLS, TLS_RX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); +} + +FIXTURE_TEARDOWN(tls_err) +{ + close(self->fd); + close(self->cfd); + close(self->fd2); + close(self->cfd2); +} + +TEST_F(tls_err, bad_rec) +{ + char buf[64]; + + if (self->notls) + SKIP(return, "no TLS support"); + + memset(buf, 0x55, sizeof(buf)); + EXPECT_EQ(send(self->fd2, buf, sizeof(buf), 0), sizeof(buf)); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EMSGSIZE); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), MSG_DONTWAIT), -1); + EXPECT_EQ(errno, EAGAIN); +} + +TEST_F(tls_err, bad_auth) +{ + char buf[128]; + int n; + + if (self->notls) + SKIP(return, "no TLS support"); + + memrnd(buf, sizeof(buf) / 2); + EXPECT_EQ(send(self->fd, buf, sizeof(buf) / 2, 0), sizeof(buf) / 2); + n = recv(self->cfd, buf, sizeof(buf), 0); + EXPECT_GT(n, sizeof(buf) / 2); + + buf[n - 1]++; + + EXPECT_EQ(send(self->fd2, buf, n, 0), n); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + +TEST_F(tls_err, bad_in_large_read) +{ + char txt[3][64]; + char cip[3][128]; + char buf[3 * 128]; + int i, n; + + if (self->notls) + SKIP(return, "no TLS support"); + + /* Put 3 records in the sockets */ + for (i = 0; i < 3; i++) { + memrnd(txt[i], sizeof(txt[i])); + EXPECT_EQ(send(self->fd, txt[i], sizeof(txt[i]), 0), + sizeof(txt[i])); + n = recv(self->cfd, cip[i], sizeof(cip[i]), 0); + EXPECT_GT(n, sizeof(txt[i])); + /* Break the third message */ + if (i == 2) + cip[2][n - 1]++; + EXPECT_EQ(send(self->fd2, cip[i], n, 0), n); + } + + /* We should be able to receive the first two messages */ + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt[0]) * 2); + EXPECT_EQ(memcmp(buf, txt[0], sizeof(txt[0])), 0); + EXPECT_EQ(memcmp(buf + sizeof(txt[0]), txt[1], sizeof(txt[1])), 0); + /* Third mesasge is bad */ + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + +TEST_F(tls_err, bad_cmsg) +{ + char *test_str = "test_read"; + int send_len = 10; + char cip[128]; + char buf[128]; + char txt[64]; + int n; + + if (self->notls) + SKIP(return, "no TLS support"); + + /* Queue up one data record */ + memrnd(txt, sizeof(txt)); + EXPECT_EQ(send(self->fd, txt, sizeof(txt), 0), sizeof(txt)); + n = recv(self->cfd, cip, sizeof(cip), 0); + EXPECT_GT(n, sizeof(txt)); + EXPECT_EQ(send(self->fd2, cip, n, 0), n); + + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + n = recv(self->cfd, cip, sizeof(cip), 0); + cip[n - 1]++; /* Break it */ + EXPECT_GT(n, send_len); + EXPECT_EQ(send(self->fd2, cip, n, 0), n); + + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt)); + EXPECT_EQ(memcmp(buf, txt, sizeof(txt)), 0); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + TEST(non_established) { struct tls12_crypto_info_aes_gcm_256 tls12; struct sockaddr_in addr; @@ -1355,64 +1630,82 @@ TEST(non_established) { TEST(keysizes) { struct tls12_crypto_info_aes_gcm_256 tls12; - struct sockaddr_in addr; - int sfd, ret, fd, cfd; - socklen_t len; + int ret, fd, cfd; bool notls; - notls = false; - len = sizeof(addr); - memset(&tls12, 0, sizeof(tls12)); tls12.info.version = TLS_1_2_VERSION; tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256; - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_ANY); - addr.sin_port = 0; + ulp_sock_pair(_metadata, &fd, &cfd, ¬ls); - fd = socket(AF_INET, SOCK_STREAM, 0); - sfd = socket(AF_INET, SOCK_STREAM, 0); + if (!notls) { + ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, + sizeof(tls12)); + EXPECT_EQ(ret, 0); + + ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, + sizeof(tls12)); + EXPECT_EQ(ret, 0); + } + + close(fd); + close(cfd); +} + +TEST(tls_v6ops) { + struct tls_crypto_info_keys tls12; + struct sockaddr_in6 addr, addr2; + int sfd, ret, fd; + socklen_t len, len2; + + tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_128, &tls12); + + addr.sin6_family = AF_INET6; + addr.sin6_addr = in6addr_any; + addr.sin6_port = 0; + + fd = socket(AF_INET6, SOCK_STREAM, 0); + sfd = socket(AF_INET6, SOCK_STREAM, 0); ret = bind(sfd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); ret = listen(sfd, 10); ASSERT_EQ(ret, 0); + len = sizeof(addr); ret = getsockname(sfd, &addr, &len); ASSERT_EQ(ret, 0); ret = connect(fd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); + len = sizeof(addr); + ret = getsockname(fd, &addr, &len); + ASSERT_EQ(ret, 0); + ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); - if (ret != 0) { - notls = true; - printf("Failure setting TCP_ULP, testing without tls\n"); + if (ret) { + ASSERT_EQ(errno, ENOENT); + SKIP(return, "no TLS support"); } + ASSERT_EQ(ret, 0); - if (!notls) { - ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, - sizeof(tls12)); - EXPECT_EQ(ret, 0); - } + ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); - cfd = accept(sfd, &addr, &len); - ASSERT_GE(cfd, 0); + ret = setsockopt(fd, SOL_TLS, TLS_RX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); - if (!notls) { - ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", - sizeof("tls")); - EXPECT_EQ(ret, 0); + len2 = sizeof(addr2); + ret = getsockname(fd, &addr2, &len2); + ASSERT_EQ(ret, 0); - ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, - sizeof(tls12)); - EXPECT_EQ(ret, 0); - } + EXPECT_EQ(len2, len); + EXPECT_EQ(memcmp(&addr, &addr2, len), 0); - close(sfd); close(fd); - close(cfd); + close(sfd); } TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/net/toeplitz.c b/tools/testing/selftests/net/toeplitz.c index 710ac956bdb3..c5489341cfb8 100644 --- a/tools/testing/selftests/net/toeplitz.c +++ b/tools/testing/selftests/net/toeplitz.c @@ -498,7 +498,7 @@ static void parse_opts(int argc, char **argv) bool have_toeplitz = false; int index, c; - while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:u:v", long_options, &index)) != -1) { + while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:uv", long_options, &index)) != -1) { switch (c) { case '4': cfg_family = AF_INET; diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index 8748199ac109..ffca314897c4 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile @@ -5,7 +5,8 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \ conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \ nft_concat_range.sh nft_conntrack_helper.sh \ nft_queue.sh nft_meta.sh nf_nat_edemux.sh \ - ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh + ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \ + conntrack_vrf.sh LDLIBS = -lmnl TEST_GEN_FILES = nf-queue diff --git a/tools/testing/selftests/netfilter/conntrack_vrf.sh b/tools/testing/selftests/netfilter/conntrack_vrf.sh new file mode 100755 index 000000000000..8b5ea9234588 --- /dev/null +++ b/tools/testing/selftests/netfilter/conntrack_vrf.sh @@ -0,0 +1,241 @@ +#!/bin/sh + +# This script demonstrates interaction of conntrack and vrf. +# The vrf driver calls the netfilter hooks again, with oif/iif +# pointing at the VRF device. +# +# For ingress, this means first iteration has iifname of lower/real +# device. In this script, thats veth0. +# Second iteration is iifname set to vrf device, tvrf in this script. +# +# For egress, this is reversed: first iteration has the vrf device, +# second iteration is done with the lower/real/veth0 device. +# +# test_ct_zone_in demonstrates unexpected change of nftables +# behavior # caused by commit 09e856d54bda5f28 "vrf: Reset skb conntrack +# connection on VRF rcv" +# +# It was possible to assign conntrack zone to a packet (or mark it for +# `notracking`) in the prerouting chain before conntrack, based on real iif. +# +# After the change, the zone assignment is lost and the zone is assigned based +# on the VRF master interface (in case such a rule exists). +# assignment is lost. Instead, assignment based on the `iif` matching +# Thus it is impossible to distinguish packets based on the original +# interface. +# +# test_masquerade_vrf and test_masquerade_veth0 demonstrate the problem +# that was supposed to be fixed by the commit mentioned above to make sure +# that any fix to test case 1 won't break masquerade again. + +ksft_skip=4 + +IP0=172.30.30.1 +IP1=172.30.30.2 +PFXL=30 +ret=0 + +sfx=$(mktemp -u "XXXXXXXX") +ns0="ns0-$sfx" +ns1="ns1-$sfx" + +cleanup() +{ + ip netns pids $ns0 | xargs kill 2>/dev/null + ip netns pids $ns1 | xargs kill 2>/dev/null + + ip netns del $ns0 $ns1 +} + +nft --version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without nft tool" + exit $ksft_skip +fi + +ip -Version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without ip tool" + exit $ksft_skip +fi + +ip netns add "$ns0" +if [ $? -ne 0 ];then + echo "SKIP: Could not create net namespace $ns0" + exit $ksft_skip +fi +ip netns add "$ns1" + +trap cleanup EXIT + +ip netns exec $ns0 sysctl -q -w net.ipv4.conf.default.rp_filter=0 +ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0 +ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0 + +ip link add veth0 netns "$ns0" type veth peer name veth0 netns "$ns1" > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not add veth device" + exit $ksft_skip +fi + +ip -net $ns0 li add tvrf type vrf table 9876 +if [ $? -ne 0 ];then + echo "SKIP: Could not add vrf device" + exit $ksft_skip +fi + +ip -net $ns0 li set lo up + +ip -net $ns0 li set veth0 master tvrf +ip -net $ns0 li set tvrf up +ip -net $ns0 li set veth0 up +ip -net $ns1 li set veth0 up + +ip -net $ns0 addr add $IP0/$PFXL dev veth0 +ip -net $ns1 addr add $IP1/$PFXL dev veth0 + +ip netns exec $ns1 iperf3 -s > /dev/null 2>&1& +if [ $? -ne 0 ];then + echo "SKIP: Could not start iperf3" + exit $ksft_skip +fi + +# test vrf ingress handling. +# The incoming connection should be placed in conntrack zone 1, +# as decided by the first iteration of the ruleset. +test_ct_zone_in() +{ +ip netns exec $ns0 nft -f - <<EOF +table testct { + chain rawpre { + type filter hook prerouting priority raw; + + iif { veth0, tvrf } counter meta nftrace set 1 + iif veth0 counter ct zone set 1 counter return + iif tvrf counter ct zone set 2 counter return + ip protocol icmp counter + notrack counter + } + + chain rawout { + type filter hook output priority raw; + + oif veth0 counter ct zone set 1 counter return + oif tvrf counter ct zone set 2 counter return + notrack counter + } +} +EOF + ip netns exec $ns1 ping -W 1 -c 1 -I veth0 $IP0 > /dev/null + + # should be in zone 1, not zone 2 + count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 1 2>/dev/null | wc -l) + if [ $count -eq 1 ]; then + echo "PASS: entry found in conntrack zone 1" + else + echo "FAIL: entry not found in conntrack zone 1" + count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 2 2> /dev/null | wc -l) + if [ $count -eq 1 ]; then + echo "FAIL: entry found in zone 2 instead" + else + echo "FAIL: entry not in zone 1 or 2, dumping table" + ip netns exec $ns0 conntrack -L + ip netns exec $ns0 nft list ruleset + fi + fi +} + +# add masq rule that gets evaluated w. outif set to vrf device. +# This tests the first iteration of the packet through conntrack, +# oifname is the vrf device. +test_masquerade_vrf() +{ + local qdisc=$1 + + if [ "$qdisc" != "default" ]; then + tc -net $ns0 qdisc add dev tvrf root $qdisc + fi + + ip netns exec $ns0 conntrack -F 2>/dev/null + +ip netns exec $ns0 nft -f - <<EOF +flush ruleset +table ip nat { + chain rawout { + type filter hook output priority raw; + + oif tvrf ct state untracked counter + } + chain postrouting2 { + type filter hook postrouting priority mangle; + + oif tvrf ct state untracked counter + } + chain postrouting { + type nat hook postrouting priority 0; + # NB: masquerade should always be combined with 'oif(name) bla', + # lack of this is intentional here, we want to exercise double-snat. + ip saddr 172.30.30.0/30 counter masquerade random + } +} +EOF + ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 >/dev/null + if [ $? -ne 0 ]; then + echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on vrf device" + ret=1 + return + fi + + # must also check that nat table was evaluated on second (lower device) iteration. + ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' && + ip netns exec $ns0 nft list table ip nat |grep -q 'untracked counter packets [1-9]' + if [ $? -eq 0 ]; then + echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device ($qdisc qdisc)" + else + echo "FAIL: vrf rules have unexpected counter value" + ret=1 + fi + + if [ "$qdisc" != "default" ]; then + tc -net $ns0 qdisc del dev tvrf root + fi +} + +# add masq rule that gets evaluated w. outif set to veth device. +# This tests the 2nd iteration of the packet through conntrack, +# oifname is the lower device (veth0 in this case). +test_masquerade_veth() +{ + ip netns exec $ns0 conntrack -F 2>/dev/null +ip netns exec $ns0 nft -f - <<EOF +flush ruleset +table ip nat { + chain postrouting { + type nat hook postrouting priority 0; + meta oif veth0 ip saddr 172.30.30.0/30 counter masquerade random + } +} +EOF + ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 > /dev/null + if [ $? -ne 0 ]; then + echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on veth device" + ret=1 + return + fi + + # must also check that nat table was evaluated on second (lower device) iteration. + ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' + if [ $? -eq 0 ]; then + echo "PASS: iperf3 connect with masquerade + sport rewrite on veth device" + else + echo "FAIL: vrf masq rule has unexpected counter value" + ret=1 + fi +} + +test_ct_zone_in +test_masquerade_vrf "default" +test_masquerade_vrf "pfifo" +test_masquerade_veth + +exit $ret diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh index 5a4938d6dcf2..ed61f6cab60f 100755 --- a/tools/testing/selftests/netfilter/nft_concat_range.sh +++ b/tools/testing/selftests/netfilter/nft_concat_range.sh @@ -23,8 +23,8 @@ TESTS="reported_issues correctness concurrency timeout" # Set types, defined by TYPE_ variables below TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto - net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port - net_port_mac_proto_net" + net_port_net net_mac mac_net net_mac_icmp net6_mac_icmp + net6_port_net6_port net_port_mac_proto_net" # Reported bugs, also described by TYPE_ variables below BUGS="flush_remove_add" @@ -277,6 +277,23 @@ perf_entries 1000 perf_proto ipv4 " +TYPE_mac_net=" +display mac,net +type_spec ether_addr . ipv4_addr +chain_spec ether saddr . ip saddr +dst +src mac addr4 +start 1 +count 5 +src_delta 2000 +tools sendip nc bash +proto udp + +race_repeat 0 + +perf_duration 0 +" + TYPE_net_mac_icmp=" display net,mac - ICMP type_spec ipv4_addr . ether_addr @@ -984,7 +1001,8 @@ format() { fi done for f in ${src}; do - __expr="${__expr} . " + [ "${__expr}" != "{ " ] && __expr="${__expr} . " + __start="$(eval format_"${f}" "${srcstart}")" __end="$(eval format_"${f}" "${srcend}")" diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index da1c1e4b6c86..d88867d2fed7 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -759,19 +759,21 @@ test_port_shadow() local result="" local logmsg="" - echo ROUTER | ip netns exec "$ns0" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 & - nc_r=$! + # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405. + echo "fake-entry" | ip netns exec "$ns2" timeout 1 socat -u STDIN UDP:"$daddrc":41404,sourceport=1405 - echo CLIENT | ip netns exec "$ns2" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 & - nc_c=$! + echo ROUTER | ip netns exec "$ns0" timeout 5 socat -u STDIN UDP4-LISTEN:1405 & + sc_r=$! - # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405. - echo "fake-entry" | ip netns exec "$ns2" nc -w 1 -p 1405 -u "$daddrc" 41404 > /dev/null + echo CLIENT | ip netns exec "$ns2" timeout 5 socat -u STDIN UDP4-LISTEN:1405,reuseport & + sc_c=$! + + sleep 0.3 # ns1 tries to connect to ns0:1405. With default settings this should connect # to client, it matches the conntrack entry created above. - result=$(echo "" | ip netns exec "$ns1" nc -w 1 -p 41404 -u "$daddrs" 1405) + result=$(echo "data" | ip netns exec "$ns1" timeout 1 socat - UDP:"$daddrs":1405,sourceport=41404) if [ "$result" = "$expect" ] ;then echo "PASS: portshadow test $test: got reply from ${expect}${logmsg}" @@ -780,7 +782,7 @@ test_port_shadow() ret=1 fi - kill $nc_r $nc_c 2>/dev/null + kill $sc_r $sc_c 2>/dev/null # flush udp entries for next test round, if any ip netns exec "$ns0" conntrack -F >/dev/null 2>&1 @@ -816,11 +818,10 @@ table $family raw { chain prerouting { type filter hook prerouting priority -300; policy accept; meta iif veth0 udp dport 1405 notrack - udp dport 1405 notrack } chain output { type filter hook output priority -300; policy accept; - udp sport 1405 notrack + meta oif veth0 udp sport 1405 notrack } } EOF @@ -851,6 +852,18 @@ test_port_shadowing() { local family="ip" + conntrack -h >/dev/null 2>&1 + if [ $? -ne 0 ];then + echo "SKIP: Could not run nat port shadowing test without conntrack tool" + return + fi + + socat -h > /dev/null 2>&1 + if [ $? -ne 0 ];then + echo "SKIP: Could not run nat port shadowing test without socat tool" + return + fi + ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null diff --git a/tools/testing/selftests/netfilter/nft_queue.sh b/tools/testing/selftests/netfilter/nft_queue.sh index 3d202b90b33d..7d27f1f3bc01 100755 --- a/tools/testing/selftests/netfilter/nft_queue.sh +++ b/tools/testing/selftests/netfilter/nft_queue.sh @@ -16,6 +16,10 @@ timeout=4 cleanup() { + ip netns pids ${ns1} | xargs kill 2>/dev/null + ip netns pids ${ns2} | xargs kill 2>/dev/null + ip netns pids ${nsrouter} | xargs kill 2>/dev/null + ip netns del ${ns1} ip netns del ${ns2} ip netns del ${nsrouter} @@ -332,6 +336,55 @@ EOF echo "PASS: tcp via loopback and re-queueing" } +test_icmp_vrf() { + ip -net $ns1 link add tvrf type vrf table 9876 + if [ $? -ne 0 ];then + echo "SKIP: Could not add vrf device" + return + fi + + ip -net $ns1 li set eth0 master tvrf + ip -net $ns1 li set tvrf up + + ip -net $ns1 route add 10.0.2.0/24 via 10.0.1.1 dev eth0 table 9876 +ip netns exec ${ns1} nft -f /dev/stdin <<EOF +flush ruleset +table inet filter { + chain output { + type filter hook output priority 0; policy accept; + meta oifname "tvrf" icmp type echo-request counter queue num 1 + meta oifname "eth0" icmp type echo-request counter queue num 1 + } + chain post { + type filter hook postrouting priority 0; policy accept; + meta oifname "tvrf" icmp type echo-request counter queue num 1 + meta oifname "eth0" icmp type echo-request counter queue num 1 + } +} +EOF + ip netns exec ${ns1} ./nf-queue -q 1 -t $timeout & + local nfqpid=$! + + sleep 1 + ip netns exec ${ns1} ip vrf exec tvrf ping -c 1 10.0.2.99 > /dev/null + + for n in output post; do + for d in tvrf eth0; do + ip netns exec ${ns1} nft list chain inet filter $n | grep -q "oifname \"$d\" icmp type echo-request counter packets 1" + if [ $? -ne 0 ] ; then + echo "FAIL: chain $n: icmp packet counter mismatch for device $d" 1>&2 + ip netns exec ${ns1} nft list ruleset + ret=1 + return + fi + done + done + + wait $nfqpid + [ $? -eq 0 ] && echo "PASS: icmp+nfqueue via vrf" + wait 2>/dev/null +} + ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null @@ -372,5 +425,6 @@ test_queue 20 test_tcp_forward test_tcp_localhost test_tcp_localhost_requeue +test_icmp_vrf exit $ret diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh index ac646376eb01..04633119b29a 100755 --- a/tools/testing/selftests/netfilter/nft_zones_many.sh +++ b/tools/testing/selftests/netfilter/nft_zones_many.sh @@ -18,11 +18,17 @@ cleanup() ip netns del $ns } -ip netns add $ns -if [ $? -ne 0 ];then - echo "SKIP: Could not create net namespace $gw" - exit $ksft_skip -fi +checktool (){ + if ! $1 > /dev/null 2>&1; then + echo "SKIP: Could not $2" + exit $ksft_skip + fi +} + +checktool "nft --version" "run test without nft tool" +checktool "ip -Version" "run test without ip tool" +checktool "socat -V" "run test without socat tool" +checktool "ip netns add $ns" "create net namespace" trap cleanup EXIT @@ -71,7 +77,8 @@ EOF local start=$(date +%s%3N) i=$((i + 10000)) j=$((j + 1)) - dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null + # nft rule in output places each packet in a different zone. + dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345 if [ $? -ne 0 ] ;then ret=1 break diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config index b71828df5a6d..a3239d5e40c7 100644 --- a/tools/testing/selftests/tc-testing/config +++ b/tools/testing/selftests/tc-testing/config @@ -60,6 +60,8 @@ CONFIG_NET_IFE_SKBTCINDEX=m CONFIG_NET_SCH_FIFO=y CONFIG_NET_SCH_ETS=m CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NETDEVSIM=m # ## Network testing diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json index 503982b8f295..91832400ddbd 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json @@ -68,7 +68,7 @@ "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ok index 667", "expExitCode": "0", "verifyCmd": "$TC action get action bpf index 667", - "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9]* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref", + "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9].* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref", "matchCount": "1", "teardown": [ "$TC action flush action bpf" diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json index 8e45792703ed..b7205a069534 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json @@ -812,5 +812,29 @@ "teardown": [ "$TC actions flush action police" ] + }, + { + "id": "7d64", + "name": "Add police action with skip_hw option", + "category": [ + "actions", + "police" + ], + "setup": [ + [ + "$TC actions flush action police", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action police rate 1kbit burst 10k index 100 skip_hw", + "expExitCode": "0", + "verifyCmd": "$TC actions ls action police | grep skip_hw", + "matchPattern": "skip_hw", + "matchCount": "1", + "teardown": [ + "$TC actions flush action police" + ] } ] diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json b/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json index 51799874a972..2df68017dfb8 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json @@ -387,5 +387,77 @@ "$TC qdisc del dev $DUMMY ingress", "$IP link del dev $DUMMY type dummy" ] + }, + { + "id": "3329", + "name": "Validate flags of the matchall filter with skip_sw and police action with skip_hw", + "category": [ + "filter", + "matchall" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions flush action police", + "$TC actions add action police rate 1mbit burst 100k index 199 skip_hw" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ipv4 matchall skip_sw action police index 199", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ipv4 matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions del action police index 199" + ] + }, + { + "id": "0eeb", + "name": "Validate flags of the matchall filter with skip_hw and police action", + "category": [ + "filter", + "matchall" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions flush action police", + "$TC actions add action police rate 1mbit burst 100k index 199" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ipv4 matchall skip_hw action police index 199", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ipv4 matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions del action police index 199" + ] + }, + { + "id": "eee4", + "name": "Validate flags of the matchall filter with skip_sw and police action", + "category": [ + "filter", + "matchall" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions flush action police", + "$TC actions add action police rate 1mbit burst 100k index 199" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ipv4 matchall skip_sw action police index 199", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ipv4 matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions del action police index 199" + ] } ] diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json index 88a20c781e49..c6046096d9db 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json +++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json @@ -15,7 +15,7 @@ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq", "expExitCode": "0", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]", "matchCount": "4", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" @@ -37,7 +37,7 @@ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq", "expExitCode": "0", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-9,a-f][0-9,a-f]{0,2} bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-9,a-f][0-9,a-f]{0,2}", "matchCount": "256", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" @@ -60,7 +60,7 @@ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq", "expExitCode": "2", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]", "matchCount": "4", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" @@ -82,7 +82,7 @@ "cmdUnderTest": "$TC qdisc del dev $ETH root handle 1: mq", "expExitCode": "2", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]", "matchCount": "0", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" @@ -106,7 +106,7 @@ "cmdUnderTest": "$TC qdisc del dev $ETH root handle 1: mq", "expExitCode": "2", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]", "matchCount": "0", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" @@ -128,7 +128,7 @@ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq", "expExitCode": "2", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]", "matchCount": "0", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py index a3e43189d940..ee22e3447ec7 100755 --- a/tools/testing/selftests/tc-testing/tdc.py +++ b/tools/testing/selftests/tc-testing/tdc.py @@ -716,6 +716,7 @@ def set_operation_mode(pm, parser, args, remaining): list_test_cases(alltests) exit(0) + exit_code = 0 # KSFT_PASS if len(alltests): req_plugins = pm.get_required_plugins(alltests) try: @@ -724,6 +725,8 @@ def set_operation_mode(pm, parser, args, remaining): print('The following plugins were not found:') print('{}'.format(pde.missing_pg)) catresults = test_runner(pm, args, alltests) + if catresults.count_failures() != 0: + exit_code = 1 # KSFT_FAIL if args.format == 'none': print('Test results output suppression requested\n') else: @@ -748,6 +751,8 @@ def set_operation_mode(pm, parser, args, remaining): gid=int(os.getenv('SUDO_GID'))) else: print('No tests found\n') + exit_code = 4 # KSFT_SKIP + exit(exit_code) def main(): """ @@ -767,8 +772,5 @@ def main(): set_operation_mode(pm, parser, args, remaining) - exit(0) - - if __name__ == "__main__": main() diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh index 7fe38c76db44..afb0cd86fa3d 100755 --- a/tools/testing/selftests/tc-testing/tdc.sh +++ b/tools/testing/selftests/tc-testing/tdc.sh @@ -1,5 +1,6 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 +modprobe netdevsim ./tdc.py -c actions --nobuildebpf ./tdc.py -c qdisc diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh index ebc4ee0fe179..8a9461aa0878 100755 --- a/tools/testing/selftests/wireguard/netns.sh +++ b/tools/testing/selftests/wireguard/netns.sh @@ -276,7 +276,11 @@ n0 ping -W 1 -c 1 192.168.241.2 n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7 ip2 link del wg0 ip2 link del wg1 -! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel +read _ _ tx_bytes_before < <(n0 wg show wg1 transfer) +! n0 ping -W 1 -c 10 -f 192.168.241.2 || false +sleep 1 +read _ _ tx_bytes_after < <(n0 wg show wg1 transfer) +(( tx_bytes_after - tx_bytes_before < 70000 )) ip0 link del wg1 ip1 link del wg0 @@ -609,6 +613,28 @@ ip0 link set wg0 up kill $ncat_pid ip0 link del wg0 +# Ensure that dst_cache references don't outlive netns lifetime +ip1 link add dev wg0 type wireguard +ip2 link add dev wg0 type wireguard +configure_peers +ip1 link add veth1 type veth peer name veth2 +ip1 link set veth2 netns $netns2 +ip1 addr add fd00:aa::1/64 dev veth1 +ip2 addr add fd00:aa::2/64 dev veth2 +ip1 link set veth1 up +ip2 link set veth2 up +waitiface $netns1 veth1 +waitiface $netns2 veth2 +ip1 -6 route add default dev veth1 via fd00:aa::2 +ip2 -6 route add default dev veth2 via fd00:aa::1 +n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2 +n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1 +n1 ping6 -c 1 fd00::2 +pp ip netns delete $netns1 +pp ip netns delete $netns2 +pp ip netns add $netns1 +pp ip netns add $netns2 + # Ensure there aren't circular reference loops ip1 link add wg1 type wireguard ip2 link add wg2 type wireguard @@ -627,7 +653,7 @@ while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do done < /dev/kmsg alldeleted=1 for object in "${!objects[@]}"; do - if [[ ${objects["$object"]} != *createddestroyed ]]; then + if [[ ${objects["$object"]} != *createddestroyed && ${objects["$object"]} != *createdcreateddestroyeddestroyed ]]; then echo "Error: $object: merely ${objects["$object"]}" >&3 alldeleted=0 fi diff --git a/tools/testing/selftests/wireguard/qemu/debug.config b/tools/testing/selftests/wireguard/qemu/debug.config index fe07d97df9fa..2b321b8a96cf 100644 --- a/tools/testing/selftests/wireguard/qemu/debug.config +++ b/tools/testing/selftests/wireguard/qemu/debug.config @@ -47,7 +47,7 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_TRACE_IRQFLAGS=y CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_LIST=y -CONFIG_DEBUG_PI_LIST=y +CONFIG_DEBUG_PLIST=y CONFIG_PROVE_RCU=y CONFIG_SPARSE_RCU_POINTER=y CONFIG_RCU_CPU_STALL_TIMEOUT=21 diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config index 74db83a0aedd..a9b5a520a1d2 100644 --- a/tools/testing/selftests/wireguard/qemu/kernel.config +++ b/tools/testing/selftests/wireguard/qemu/kernel.config @@ -66,6 +66,7 @@ CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15 +CONFIG_LOG_BUF_SHIFT=18 CONFIG_PRINTK_TIME=y CONFIG_BLK_DEV_INITRD=y CONFIG_LEGACY_VSYSCALL_NONE=y |